repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bdh1011/wau
|
venv/lib/python2.7/site-packages/pandas/tests/test_categorical.py
|
1
|
118271
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
from datetime import datetime
from pandas.compat import range, lrange, u
import os
import pickle
import re
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas import Categorical, Index, Series, DataFrame, PeriodIndex, Timestamp, CategoricalIndex
from pandas.core.config import option_context
import pandas.core.common as com
import pandas.compat as compat
import pandas.util.testing as tm
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c),dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
# this however will raise as cannot be sorted
# but fixed in newer versions of numpy
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'),categories=list('abc'),ordered=False)
c2 = Categorical(list('aabca'),categories=list('cab'),ordered=False)
c3 = Categorical(list('aabca'),categories=list('cab'),ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1,ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a","b","c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c","b","a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1,2], [1,2,2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a","b"], ["a","b","b"])
self.assertRaises(ValueError, f)
def f():
Categorical([1,2], [1,2,np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(c1, categories=["a","b","c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a","b","c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","c","b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a","b","c","d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a","b","c","d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan,1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3 ])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3. ])
self.assertTrue(com.is_float_dtype(cat.categories))
# preserve int as far as possible by converting to object if NaN is in categories
cat = pd.Categorical([np.nan, 1, 2, 3], categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember the original type"
# feature to try to cast the array interface result to...
#vals = np.asarray(cat[cat.notnull()])
#self.assertTrue(com.is_integer_dtype(vals))
cat = pd.Categorical([np.nan,"a", "b", "c"], categories=[np.nan,"a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
cat = pd.Categorical([np.nan, 1., 2., 3.], categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=["a","b","c"])
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0,1,2,0,1,2], categories=[3,4,5])
# the next one are from the old docs, but unfortunately these don't trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3])
cat = Categorical([1,2], categories=[1,2,3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([],dtype='int64'),categories=[3,2,1],ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'),categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci.astype(object),categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull returned a scalar
# for a generator
from pandas.compat import range as xrange
exp = Categorical([0,1,2])
cat = Categorical((x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0,1,2], categories=(x for x in [0,1,2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0,1,2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1,2], [1,2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1,2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0,1,2], ["a","a","b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2,1,2], ["a","b","c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a","b","c"], ordered=False)
res = Categorical.from_codes([0,1,2], ["a","b","c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0,1], 5, p=[0.9,0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a","b","c"], categories=["c","b","a"], ordered=True)
cat_rev_base = pd.Categorical(["b","b","b"], categories=["c","b","a"], ordered=True)
cat = pd.Categorical(["a","b","c"], ordered=True)
cat_base = pd.Categorical(["b","b","b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(["b","b","b"], categories=["c","b","a","d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on newer
# numpy versions
a = np.array(["b","b","b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1 and not on PY3.2
if LooseVersion(np.__version__) > "1.7.1" and not compat.PY3_2:
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in account
cat_rev = pd.Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame.from_dict(dict(counts=[3, 2, 3],
freqs=[3/8., 2/8., 3/8.],
categories=['a', 'b', 'c'])
).set_index('categories')
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a","b","c","d"], inplace=True)
desc = cat.describe()
expected = DataFrame.from_dict(dict(counts=[3, 2, 3, 0],
freqs=[3/8., 2/8., 3/8., 0],
categories=['a', 'b', 'c', 'd'])
).set_index('categories')
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1,2,3,1,2,3,3,2,1,1,1]).describe()
expected = DataFrame.from_dict(dict(counts=[5, 3, 3],
freqs=[5/11., 3/11., 3/11.],
categories=[1,2,3]
)
).set_index('categories')
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan,1, 2, 2])
desc = cat.describe()
expected = DataFrame.from_dict(dict(counts=[1, 2, 1],
freqs=[1/4., 2/4., 1/4.],
categories=[1,2,np.nan]
)
).set_index('categories')
tm.assert_frame_equal(desc, expected)
# NA as a category
cat = pd.Categorical(["a","c","c",np.nan], categories=["b","a","c",np.nan])
result = cat.describe()
expected = DataFrame([[0,0],[1,0.25],[2,0.5],[1,0.25]],
columns=['counts','freqs'],
index=Index(['b','a','c',np.nan],name='categories'))
tm.assert_frame_equal(result,expected)
# NA as an unused category
cat = pd.Categorical(["a","c","c"], categories=["b","a","c",np.nan])
result = cat.describe()
expected = DataFrame([[0,0],[1,1/3.],[2,2/3.],[0,0]],
columns=['counts','freqs'],
index=Index(['b','a','c',np.nan],name='categories'))
tm.assert_frame_equal(result,expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0,1,2,0,1,2]*100, ['a', 'b', 'c'], name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]",
"Name: cat, Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a","b","c"], name="cat")
expected = ("[], Name: cat, Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
factor = Categorical([], ["a","b","c"])
expected = ("[], Categories (3, object): [a, b, c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], ["a","b","c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1,2,3,4], name="a"))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"Name: a, dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2],dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0],dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0],dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a","b","c","a"])
exp = np.array([1,2,3,1])
s.categories = [1,2,3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1,2,3]))
# lengthen
def f():
s.categories = [1,2,3,4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1,2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0,1,2])
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0,1,2],ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a","c","b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a','b','c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b','c','a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a","c","b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a','b','c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a","c","b"], categories=['b','c','a'], ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b','c','a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a","b","c","a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
res = cat.set_categories(["c","b","a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a","b","c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a","b","c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now np.nan
cat = Categorical(["a","b","c","a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0,-1,-1,0]))
# still not all "old" in "new"
res = cat.set_categories(["a","b","d"])
self.assert_numpy_array_equal(res.codes, np.array([0,1,-1,0]))
self.assert_numpy_array_equal(res.categories, np.array(["a","b","d"]))
# all "old" included in "new"
cat = cat.set_categories(["a","b","c","d"])
exp_categories = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1,2,3,4,1], categories=[1,2,3,4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0,1,2,3,0]))
self.assert_numpy_array_equal(c.categories , np.array([1,2,3,4] ))
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1] ))
c = c.set_categories([4,3,2,1]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3,2,1,0,3])) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4,3,2,1])) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1,2,3,4,1])) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4,3,2,1],ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4,3,2,1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a","b","c","a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1,2,3])
self.assert_numpy_array_equal(res.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(res.categories, np.array([1,2,3]))
self.assert_numpy_array_equal(cat.__array__(), np.array(["a","b","c","a"]))
self.assert_numpy_array_equal(cat.categories, np.array(["a","b","c"]))
res = cat.rename_categories([1,2,3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1,2,3,1]))
self.assert_numpy_array_equal(cat.categories, np.array([1,2,3]))
# lengthen
def f():
cat.rename_categories([1,2,3,4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1,2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["c","b","a"], ordered=True)
# first inplace == False
res = cat.reorder_categories(["c","b","a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c","b","a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a","b","c","a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a","b","d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a","b","c","d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b","c","a"], categories=["a","b","c","d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a","b","c","a"], ordered=True)
old = cat.copy()
new = Categorical(["a","b",np.nan,"a"], categories=["a","b"], ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a","b","c","d","a"], categories=["a","b","c","d","e"])
exp_categories_all = np.array(["a","b","c","d","e"])
exp_categories_dropped = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,-1,-1,0]))
# If categories have nan included, the code should point to that instead
c = Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan])
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,2,2,0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a","b","c","a"])
c.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,1,2,0]))
# Adding nan to categories should make assigned nan point to the category!
c = Categorical(["a","b",np.nan,"a"])
self.assert_numpy_array_equal(c.categories , np.array(["a","b"]))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,1,-1,0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories , np.array(["a","b",np.nan],dtype=np.object_))
self.assert_numpy_array_equal(c._codes , np.array([0,2,-1,0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT], [pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
self.assertRaises(ValueError, lambda: Categorical([], categories=nulls))
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
c = Categorical(["a","b",np.nan], categories=["a","b",np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a","b",np.nan])
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a","b","c","a", np.nan])
exp = np.array([0,1,2,0,-1],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0,1,2,0,1],dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes= c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be writeable!
c[4] = "a"
exp = np.array([0,1,2,0,0],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0,1,2,0, 2],dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a","b","c","d"], ordered=False)
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Categorical(["a","b","c","d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
cat = Categorical(["a","b"])
exp = np.asarray(["a","b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a","b","a","a"], categories=["a","b","c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
# unique should not sort
cat = Categorical(["b", "b", np.nan, "a"], categories=["a","b","c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
def test_mode(self):
s = Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([5,1], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan,np.nan,np.nan,4,5], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan,np.nan,4,5,4], categories=[5,4,3,2,1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5,4,3,2,1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a","b","b","a"], ordered=False)
cat.order()
cat.sort()
cat = Categorical(["a","c","b","d"], ordered=True)
# order
res = cat.order()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True)
res = cat.order()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.order(ascending=False)
exp = np.array(["d","c","b","a"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a","b","c","d","a","b","c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d","a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1,2,3])
exp = pd.Categorical([1,np.nan,3], categories=[1,2,3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0,3,2,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0,3,0,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0,3,3,-1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1,2, np.nan, 3], categories=[1,2,3])
cat.set_categories([1,2,3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0,1,3,2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_nbytes(self):
cat = pd.Categorical([1,2,3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk' ])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts' ])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right', sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017, whatever is earlier
cat = pd.Categorical([1,2,3, np.nan], categories=[1,2,3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1,2,3, np.nan], levels=[1,2,3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4 , [False, False, False])
self.assert_numpy_array_equal(cat != 4 , [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort(columns=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
self.cat = df
def test_dtypes(self):
dtype = com.CategoricalDtype()
hash(dtype)
self.assertTrue(com.is_categorical_dtype(dtype))
s = Series(self.factor,name='A')
# dtypes
self.assertTrue(com.is_categorical_dtype(s.dtype))
self.assertTrue(com.is_categorical_dtype(s))
self.assertFalse(com.is_categorical_dtype(np.dtype('float64')))
# np.dtype doesn't know about our new dtype
def f():
np.dtype(dtype)
self.assertRaises(TypeError, f)
self.assertFalse(dtype == np.str_)
self.assertFalse(np.str_ == dtype)
# GH8143
index = ['cat','obj','num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False,True,False],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False,False,True],index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True,False,False],index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000) ])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo','bar','baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400) ])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300) ])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype,'category')
self.assertEqual(len(s),len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A' : self.factor })
result = df['A']
tm.assert_series_equal(result,s)
result = df.iloc[:,0]
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A' : s })
result = df['A']
tm.assert_series_equal(result,s)
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A' : s, 'B' : s, 'C' : 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df),len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result,expected)
result = x.person_name[0]
self.assertEqual(result,expected)
result = x.person_name.loc[0]
self.assertEqual(result,expected)
def test_creation_astype(self):
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1,2,3,1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats":[1,2,3,4,5,6], "vals":[1,2,3,4,5,6]})
cats = Categorical([1,2,3,4,5,6])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats":['a', 'b', 'b', 'a', 'a', 'd'], "vals":[1,2,3,4,5,6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats":cats, "vals":[1,2,3,4,5,6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a","b","c","a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1,2,3,1]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
l = ["a","b","c","a"]
exp = Series(l).astype('category')
res = Series(l,dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame({'x': Series(['a', 'b', 'c'],dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category')})
tm.assert_frame_equal(df,expected)
df = DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abd'))])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : Series(list('abd'),dtype='category')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')),list('def')])
expected = DataFrame({ 0 : Series(list('abc'),dtype='category'),
1 : list('def')},columns=[0,1])
tm.assert_frame_equal(df,expected)
# invalid (shape)
self.assertRaises(ValueError, lambda : DataFrame([pd.Categorical(list('abc')),pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError, lambda : pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo']*len(p.major_axis))
expected = DataFrame({'A' : c.copy(),
'B' : c.copy(),
'C' : c.copy(),
'D' : c.copy()},
columns=Index(list('ABCD'),name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'],dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan,np.nan,np.nan],categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b','c'],categories=['a', 'b', 'c']))
expected.index = [1,2]
result = s.reindex([1,2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(values=['c',np.nan],categories=['a', 'b', 'c']))
expected.index = [2,3]
result = s.reindex([2,3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either the series or the
# categorical should not change the values in the other one, IF you specify copy!
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
exp_cat = np.array(["a","b","c","a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a","b","c","a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1,2,3]
exp_s = np.array([1,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2,2,3,1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0,1,-1,0]))
# If categories have nan included, the label should point to that instead
s2 = Series(Categorical(["a","b",np.nan,"a"], categories=["a","b",np.nan]))
self.assert_numpy_array_equal(s2.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0,1,2,0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a","b","c","a"]))
s3.cat.categories = ["a","b",np.nan]
self.assert_numpy_array_equal(s3.cat.categories,
np.array(["a","b",np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0,1,2,0]))
def test_cat_accessor(self):
s = Series(Categorical(["a","b",np.nan,"a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a","b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a","b",np.nan,"a"], categories=["b","a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result,expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda : Series([1,2,3]).cat)
tm.assertRaisesRegexp(AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda : Series([1,2,3]).cat)
self.assertRaises(AttributeError, lambda : Series(['a','b','c']).cat)
self.assertRaises(AttributeError, lambda : Series(np.arange(5.)).cat)
self.assertRaises(AttributeError, lambda : Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered' and the
# methods '.set_categories()' 'drop_unused_categories()' to the categorical
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["a","b","c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1,2,3]
exp_categories = np.array([1,2,3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0,1,2,0],dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a","b","c","a"], ordered=True))
exp_categories = np.array(["c","b","a"])
exp_values = np.array(["a","b","c","a"])
s = s.cat.set_categories(["c","b","a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a","b","b","a"], categories=["a","b","c"]))
exp_categories = np.array(["a","b"])
exp_values = np.array(["a","b","b","a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error on wrong inputs:
def f():
s.set_categories([4,3,2,1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100),dtype='int32')})
labels = [ "{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500) ]
df = df.sort(columns=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype()],index=['value','D'])
tm.assert_series_equal(result,expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(), com.CategoricalDtype()],
index=['value','D','E'])
tm.assert_series_equal(result,expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1,2,3,10], categories=[1,2,3,4,10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns),1)
# In a frame, describe() for the cat should be the same as for string arrays (count, unique,
# top, freq)
cat = Categorical(["a","b","b","b"], categories=['a','b','c'], ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4,2,"b",3],index=['count','unique','top', 'freq'])
tm.assert_series_equal(result,expected)
cat = pd.Series(pd.Categorical(["a","b","c","c"]))
df3 = pd.DataFrame({"cat":cat, "s":["a","b","c","c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1,2,3,4], name="a"))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"Name: a, dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a","b"] *25, name="a"))
exp = u("0 a\n1 b\n" + " ..\n" +
"48 a\n49 b\n" +
"Name: a, dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(["a","b"], name="a", categories=levs, ordered=True))
exp = u("0 a\n1 b\n" +
"Name: a, dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp,a.__unicode__())
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({ 'int64' : np.random.randint(100,size=n) })
df['category'] = Series(np.array(list('abcdefghij')).take(np.random.randint(0,10,size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category']=='d']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
#self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a","b","c","d"], ordered=False))
self.assertRaises(TypeError, lambda : cat.min())
self.assertRaises(TypeError, lambda : cat.max())
cat = Series(Categorical(["a","b","c","d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a","b","c","d"], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical([np.nan,"b","c",np.nan], categories=['d','c','b','a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1,1,2,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,1,1,4,5,5,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([5,1], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1,2,3,4,5], categories=[5,4,3,2,1], ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5,4,3,2,1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(["a","b","c","c","c","b"], categories=["c","a","b","d"]))
res = s.value_counts(sort=False)
exp = Series([3,1,2,0], index=["c","a","b","d"])
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3,2,1,0], index=["c","b","a","d"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=["a", "b"]))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=["a", "b"]))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=["a", "b"]))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=[np.nan, "a", "b"]))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3], index=["a", "b", np.nan]))
s = pd.Series(pd.Categorical(["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=["a", "b"]))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0], index=["a", "b", np.nan]))
s = pd.Series(pd.Categorical(["a", "b", None, "a", None, None], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=["a", "b"]))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=[np.nan, "a", "b"]))
def test_groupby(self):
cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], categories=["a","b","c","d"], ordered=True)
data = DataFrame({"a":[1,1,1,2,2,2,3,4,5], "b":cats})
expected = DataFrame({ 'a' : Series([1,2,4,np.nan],index=Index(['a','b','c','d'],name='b')) })
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
# single grouper
gb = df.groupby("A")
expected = DataFrame({ 'values' : Series([3,7,np.nan],index=Index(['a','b','z'],name='A')) })
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A','B'])
expected = DataFrame({ 'values' : Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B'])) })
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo','bar']*2
gb = df.groupby(['A','B','C'])
expected = DataFrame({ 'values' :
Series(np.nan,index=pd.MultiIndex.from_product([['a','b','z'],
['c','d','y'],
['foo','bar']],
names=['A','B','C']))
}).sortlevel()
expected.iloc[[1,2,7,8],0] = [1,2,3,4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x=pd.DataFrame([[1,'John P. Doe'],[2,'Jane Dove'],[1,'John P. Doe']],
columns=['person_id','person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x:x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0,1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0,1]].copy()
expected.index = Index([1,2],name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0,10,20,30,40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0], index=c.values.categories)
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a","a","b","b"], categories=["a","b","z"], ordered=True)
raw_cat2 = Categorical(["c","d","c","d"], categories=["c","d","y"], ordered=True)
df = DataFrame({"A":raw_cat1,"B":raw_cat2, "values":[1,2,3,4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1,2,np.nan,3,4,np.nan,np.nan,np.nan,np.nan],
index=pd.MultiIndex.from_product([['a','b','z'],['c','d','y']],names=['A','B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan,1,2,np.nan], categories=[5,4,3,2,1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
cat = Series(Categorical(["a","b","b","a"], ordered=False))
# sort in the categories order
expected = Series(Categorical(["a","a","b","b"], ordered=False),index=[0,3,1,2])
result = cat.order()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a","c","b","d"], ordered=True))
res = cat.order()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a","c","b","d"], categories=["a","b","c","d"], ordered=True))
res = cat.order()
exp = np.array(["a","b","c","d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.order(ascending=False)
exp = np.array(["d","c","b","a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a","b","c","d"], categories=["a","b","c","d"], ordered=False)
raw_cat2 = Categorical(["a","b","c","d"], categories=["d","c","b","a"], ordered=True)
s = ["a","b","c","d"]
df = DataFrame({"unsort":raw_cat1,"sort":raw_cat2, "string":s, "values":[1,2,3,4]})
# Cats must be sorted in a dataframe
res = df.sort(columns=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort(columns=["sort"], ascending=False)
exp = df.sort(columns=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort(columns=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id":[6,5,4,3,2,1], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort(columns=['grade'])
expected = df.iloc[[1,2,5,0,3,4]]
tm.assert_frame_equal(result,expected)
# multi
result = df.sort(columns=['grade', 'id'])
expected = df.iloc[[2,1,5,4,3,0]]
tm.assert_frame_equal(result,expected)
# reverse
cat = Categorical(["a","c","c","b","d"], ordered=True)
res = cat.order(ascending=False)
exp_val = np.array(["d","c", "c", "b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a", np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d","c","b","a"],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a","c","b","d", np.nan], ordered=True)
res = cat.order(ascending=False, na_position='last')
exp_val = np.array(["d","c","b","a",np.nan],dtype=object)
exp_categories = np.array(["a","b","c","d"],dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1,2,3,4]))
reversed = cat[::-1]
exp = np.array([4,3,2,1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100)+1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0,25,50,75,100])
expected = Series([11,'(0, 25]'], index=['value','D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11,21).astype('int64')},
index=np.arange(10,20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0,25,50,75,100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9,'(0, 25]'],index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(["a","c","b","c","c","c","c"], categories=["a","b","c"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values= [1,2,3,4,5,6,7]
df = pd.DataFrame({"cats":cats,"values":values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b","c"], categories=["a","b","c"])
idx2 = pd.Index(["j","k"])
values2= [3,4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats,index=idx,name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b",3], index=["cats","values"], dtype="object", name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4,:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2,:]
tm.assert_series_equal(res_row, exp_row)
tm.assert_isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:,0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2,0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assert_isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j","cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
#res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k",:]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j",:]
tm.assert_series_equal(res_row, exp_row)
tm.assert_isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:,"cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j",0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2,0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j","cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy,exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy,exp_fancy)
# get_value
res_val = df.get_value("j","cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.irow(2)
tm.assert_series_equal(res_row, exp_row)
tm.assert_isinstance(res_row["cats"], compat.string_types)
res_df = df.irow(slice(2,4))
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.irow([2,3])
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.icol(0)
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.icol(slice(0,2))
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.icol([0,1])
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
#GH 7918
cats = Categorical(["a","b","b","b","c","c","c"], categories=["a","b","c"])
idx = Index(["h","i","j","k","l","m","n",])
values= [1,2,2,2,3,4,5]
df = DataFrame({"cats":cats,"values":values}, index=idx)
result = df.iloc[2:4,:]
expected = DataFrame({"cats":Categorical(['b','b'],categories=['a','b','c']),"values":[2,2]}, index=['j','k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4,:].dtypes
expected = Series(['category','int64'],['cats','values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j","cats"]
expected = Series(Categorical(['a','b','b'], name='cats',
categories=['a','b','c']), index=['h','i','j'])
tm.assert_series_equal(result, expected)
result = df.ix["h":"j",0:1]
expected = DataFrame({'cats' : Series(Categorical(['a','b','b'],categories=['a','b','c']),index=['h','i','j']) })
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# - assign multiple rows (mixed values) (-> array) -> exp_multi_row
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
cats = pd.Categorical(["a","a","a","a","a","a","a"], categories=["a","b"])
idx = pd.Index(["h","i","j","k","l","m","n"])
values = [1,1,1,1,1,1,1]
orig = pd.DataFrame({"cats":cats,"values":values}, index=idx)
### the expected values
# changed single row
cats1 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx1 = pd.Index(["h","i","j","k","l","m","n"])
values1 = [1,1,2,1,1,1,1]
exp_single_row = pd.DataFrame({"cats":cats1,"values":values1}, index=idx1)
#changed multiple rows
cats2 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx2 = pd.Index(["h","i","j","k","l","m","n"])
values2 = [1,1,2,2,1,1,1]
exp_multi_row = pd.DataFrame({"cats":cats2,"values":values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(["a","a","b","b","a","a","a"], categories=["a","b"])
idx3 = pd.Index(["h","i","j","k","l","m","n"])
values3 = [1,1,1,1,1,1,1]
exp_parts_cats_col = pd.DataFrame({"cats":cats3,"values":values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(["a","a","b","a","a","a","a"], categories=["a","b"])
idx4 = pd.Index(["h","i","j","k","l","m","n"])
values4 = [1,1,1,1,1,1,1]
exp_single_cats_value = pd.DataFrame({"cats":cats4,"values":values4}, index=idx4)
#### iloc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2,0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2,:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2,:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4,:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4,:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4,0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.iloc[2:4,0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4,0] = ["c","c"]
#### loc #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j","cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k","cats"] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.loc["j":"k","cats"] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k","cats"] = ["c","c"]
#### ix #####
################
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j",0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j",0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j",:] = ["b",2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j",:] = ["c",2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k",:] = [["b",2],["b",2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k",:] = [["c",2],["c",2]]
self.assertRaises(ValueError, f)
# - assign a part of a column with dtype == categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["b","b"], categories=["a","b","c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k",0] = pd.Categorical(["c","c"], categories=["a","b","c"])
# - assign a part of a column with dtype != categorical -> exp_parts_cats_col
df = orig.copy()
df.ix["j":"k",0] = ["b","b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k",0] = ["c","c"]
# iat
df = orig.copy()
df.iat[2,0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2,0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j","cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j","cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(["a","a","c","c","a","a","a"], categories=["a","b","c"])
idxf = pd.Index(["h","i","j","k","l","m","n"])
valuesf = [1,1,3,3,1,1,1]
df = pd.DataFrame({"cats":catsf,"values":valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a","b","c"], inplace=True)
df[df["cats"] == "c"] = ["b",2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j","cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j","cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of the Catgorical
df = pd.DataFrame({"a":[1,1,1,1,1], "b":["a","a","a","a","a"]})
exp = pd.DataFrame({"a":[1,"b","b",1,1], "b":["a","a","b","b","a"]})
df.loc[1:2,"a"] = pd.Categorical(["b","b"], categories=["a","b"])
df.loc[2:3,"b"] = pd.Categorical(["b","b"], categories=["a","b"])
tm.assert_frame_equal(df, exp)
######### Series ##########
orig = Series(pd.Categorical(["b","b"], categories=["a","b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(pd.Categorical(["b","a"], categories=["a","b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1,2,3]))
exp = Series(Categorical([1,np.nan,3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1,2,3], [3,2,1], [2,2,2])]
for data , reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse, ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse, ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also not the other way
# around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057 and following
# comparisons with scalars not in categories should raise for unequal comps, but not for
# equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d" , Series([False, False, False]))
self.assert_series_equal(cat != "d" , Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a","b","c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'),dtype="category")
b = Series(list('abc'),dtype="object")
c = Series(['a','b','cc'],dtype="object")
d = Series(list('acb'),dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a=='a').all())
self.assertTrue(((a!='a') == ~(a=='a')).all())
self.assertFalse(('a'==a).all())
self.assertTrue((a=='a')[0])
self.assertTrue(('a'==a)[0])
self.assertFalse(('a'!=a)[0])
# vs list-like
self.assertTrue((a==a).all())
self.assertFalse((a!=a).all())
self.assertTrue((a==list(a)).all())
self.assertTrue((a==b).all())
self.assertTrue((b==a).all())
self.assertTrue(((~(a==b))==(a!=b)).all())
self.assertTrue(((~(b==a))==(b!=a)).all())
self.assertFalse((a==c).all())
self.assertFalse((c==a).all())
self.assertFalse((a==d).all())
self.assertFalse((d==a).all())
# vs a cat-like
self.assertTrue((a==e).all())
self.assertTrue((e==a).all())
self.assertFalse((a==f).all())
self.assertFalse((f==a).all())
self.assertTrue(((~(a==e)==(a!=e)).all()))
self.assertTrue(((~(e==a)==(e!=a)).all()))
self.assertTrue(((~(a==f)==(a!=f)).all()))
self.assertTrue(((~(f==a)==(f!=a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df,df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
pd.concat([df,df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id":[1,2,3,4,5,6], "raw_grade":['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories, df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories, df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories, dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'),dtype='category')
s2 = Series(list('abd'),dtype='category')
def f():
pd.concat([s,s2])
self.assertRaises(ValueError, f)
result = pd.concat([s,s],ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s,s])
expected = Series(list('abcabc'),index=[0,1,2,0,1,2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) })
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) })
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6,dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('cab')) }).set_index('B')
result = pd.concat([df2,df2])
expected = DataFrame({'A' : pd.concat([a,a]), 'B' : pd.concat([b,b]).astype('category',categories=list('cab')) }).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A' : a, 'B' : b.astype('category',categories=list('abc')) }).set_index('B')
self.assertRaises(TypeError, lambda : pd.concat([df2,df3]))
def test_append(self):
cat = pd.Categorical(["a","b"], categories=["a","b"])
vals = [1,2]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical(["a","b","a","b"], categories=["a","b"])
vals2 = [1,2,1,2]
exp = pd.DataFrame({"cats":cat2, "vals":vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same categories
cat3 = pd.Categorical(["a","b"], categories=["a","b","c"])
vals3 = [1,2]
df_wrong_categories = pd.DataFrame({"cats":cat3, "vals":vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'},
'd': {0: 'null', 1: 'null', 2: 'null', 3: 'null', 4: 'null'}})
left = DataFrame({'a': {0: 'f', 1: 'f', 2: 'f', 3: 'f', 4: 'f'},
'b': {0: 'g', 1: 'g', 2: 'g', 3: 'g', 4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
#GH10183
cat = pd.Categorical(["a","b"], categories=["a","b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a","b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1,2,3,np.nan], categories=[1,2,3])
vals = ["a","b",np.nan,"d"]
df = pd.DataFrame({"cats":cat, "vals":vals})
cat2 = pd.Categorical([1,2,3,3], categories=[1,2,3])
vals2 = ["a","b","b","d"]
df_exp_fill = pd.DataFrame({"cats":cat2, "vals":vals2})
cat3 = pd.Categorical([1,2,3], categories=[1,2,3])
vals3 = ["a","b",np.nan]
df_exp_drop_cats = pd.DataFrame({"cats":cat3, "vals":vals3})
cat4 = pd.Categorical([1,2], categories=[1,2,3])
vals4 = ["a","b"]
df_exp_drop_all = pd.DataFrame({"cats":cat4, "vals":vals4})
# fillna
res = df.fillna(value={"cats":3, "vals":"b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats":4, "vals":"c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories into account
c = Categorical(["a","b",np.nan])
c.set_categories(["a","b",np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats":c, "vals":[1,2,3]})
df_exp = pd.DataFrame({"cats": Categorical(["a","b","a"]), "vals": [1,2,3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'),expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()),expected)
self.assertRaises(ValueError, lambda : s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1,2,3,4]).astype(int)
tm.assert_series_equal(s2.astype('int') , exp2)
# object don't sort correctly, so just compare that we have the same values
def cmp(a,b):
tm.assert_almost_equal(np.sort(np.unique(a)),np.sort(np.unique(b)))
expected = Series(np.array(s.values),name='value_group')
cmp(s.astype('object'),expected)
cmp(s.astype(np.object_),expected)
# array conversion
tm.assert_almost_equal(np.array(s),np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(com.CategoricalDtype())]:
result = valid(s)
tm.assert_series_equal(result,s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda : invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat,cat.astype('category'))
tm.assert_almost_equal(np.array(cat),cat.astype('object'))
self.assertRaises(ValueError, lambda : cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({ 'A' : list('abc') }, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '<i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(self.cat,op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g. min/max)
s = self.cat['value_group']
for op in ['kurt','skew','var','std','mean','sum','median']:
self.assertRaises(TypeError, lambda : getattr(s,op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1,2,3,4]))
self.assertRaises(TypeError, lambda : np.sum(s))
# numeric ops on a Series
for op in ['__add__','__sub__','__mul__','__truediv__']:
self.assertRaises(TypeError, lambda : getattr(s,op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda : np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories','codes','ordered','set_categories',
'add_categories', 'remove_categories', 'rename_categories',
'reorder_categories', 'remove_unused_categories',
'as_ordered', 'as_unordered']
def get_dir(s):
results = [ r for r in s.cat.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_pickle_v0_14_1(self):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
self.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
|
mit
|
jay-z007/BCI_motion_simulation
|
bci2.py
|
1
|
1029
|
import random
from numpy import *
from neural_network import *
from helper_functions import *
from init import *
import filter
X = filter.data
Y = filter.target
X_len = len(X)
print 'lenX',len(X),'lenY',len(Y)
from sklearn.preprocessing import normalize
X = normalize(X)
# print X[0]
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = .15)
len_Y_train = len(Y_train)
# print "\n\nX = ", X, "\n\nY = ", Y
from sklearn.neural_network import MLPClassifier
my_classifier = MLPClassifier(hidden_layer_sizes=(100), activation='logistic', alpha=0.0005, learning_rate='invscaling',
max_iter=10, tol=0.00000001, verbose=True, warm_start=True)
for i in range(200):
print i
my_classifier.fit(X_train, Y_train)
predictions = my_classifier.predict(X_test)
for i in range(len(Y_test)):
print predictions[i], Y_test[i]
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(Y_test, predictions)
print accuracy
##########################
|
mit
|
kjung/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
srmanikandasriram/discrete-optimization-coursera
|
coloring/test.py
|
2
|
2576
|
#!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
#from networkx import *
#from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=nx.graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
|
mit
|
mojoboss/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
sssundar/NetworkSimulator
|
Code/Python/visualize.py
|
1
|
25750
|
'''
This function is run from the command line as:
python visualize.py --testcase testcase.json --tcp [fast|vegas|reno]
Raw measurements are dumped to /results/all_measurements.txt
Parsed measurements and plots are stored in /Code/Python/results/[rawdata,plots]
These directories are cleared at the start of each run.
Currently, supported plots include:
- link rate (mpbs)
- buffer occupancy (%)
- packet loss (packets)
- flow rate (Mbps)
- flow window size (packets)
- packet round trip time (ms)
Plenty more measurements are made, so check out the actual data dumps.
Any plot for which data is reported is plotted.
Time/Bin Averages are used when they improve understanding, but not when
they hide the inner workings of the network. In many cases events are
plotted directly.
Last Revised by Sushant Sundaresh on 6 Dec 2015
References:
http://matplotlib.org/examples/pylab_examples/simple_plot.html
http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python
http://stackoverflow.com/questions/14245227/python-reset-stdout-to-normal-after-previously-redirecting-it-to-a-file
http://stackoverflow.com/questions/273192/in-python-check-if-a-directory-exists-and-create-it-if-necessary
'''
import constants
import sys, os
import json
import matplotlib.pyplot as plt
import numpy as np
from main import MainLoop
from link import Link
from flow import Flow, Data_Source
from tcp_reno_working import Working_Data_Source_TCP_RENO, Working_Data_Sink_TCP_RENO
from tcp_fast_working import Working_Data_Source_TCP_FAST
from tcp_vegas_working import Working_Data_Source_TCP_VEGAS
def handle_linkrate (datamap, datalog):
if datalog["measurement"] == "linkrate":
if not (datalog["linkid"] in datamap.keys()):
datamap[datalog["linkid"]] = {}
if not (datalog["measurement"] in datamap[datalog["linkid"]].keys()):
datamap[datalog["linkid"]][datalog["measurement"]] = []
datamap[datalog["linkid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["mbits_propagated"])\
]\
)
def handle_flowrate (datamap, datalog):
if datalog["measurement"] == "flowrate":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["mbits_received_at_sink"])\
]\
)
def handle_packet_loss (datamap, datalog):
if datalog["measurement"] == "packetloss":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
# exactly one packet loss is reported each time
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(1.0)\
]\
)
# Data is parsed into triply nested dict with key-levels at link-id,
# measurement type, and link buffer direction. The final values
# are just [time (ms), buffer fractional occupancy (0-1)]
def handle_buffer_occupancy (datamap, datalog):
if datalog["measurement"] == "bufferoccupancy":
if not (datalog["linkid"] in datamap.keys()):
datamap[datalog["linkid"]] = {}
if not (datalog["measurement"] in datamap[datalog["linkid"]].keys()):
datamap[datalog["linkid"]][datalog["measurement"]] = {}
if not (datalog["direction"] in datamap[datalog["linkid"]][datalog["measurement"]].keys()):
datamap[datalog["linkid"]][datalog["measurement"]][datalog["direction"]] = []
datamap[datalog["linkid"]][datalog["measurement"]][datalog["direction"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["fractional_buffer_occupancy"])\
]\
)
def handle_flow_window (datamap, datalog):
if datalog["measurement"] == "windowsize":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["windowsize"])\
]\
)
def handle_flow_state (datamap, datalog):
if datalog["measurement"] == "flowstate":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["state"])\
]\
)
def handle_packets_outstanding (datamap, datalog):
if datalog["measurement"] == "outstandingpackets":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
float(datalog["packets_out"]), \
float(datalog["packets_left"]),\
float(datalog["packets_in_transit"]),\
float(datalog["packets_ackd"]),\
float(datalog["total_packets"]),\
]\
)
def handle_flow_reno_debug (datamap, datalog):
if datalog["measurement"] == "fullrenodebug":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
datalog["SendReceive"],\
int(datalog["whichPacket"]),\
int(datalog["EPIT"]),\
int(datalog["LPIA"]),\
float(datalog["WS"]),\
float(datalog["CAT"]),\
float(datalog["STT"]),\
int(datalog["L3P0"]),\
int(datalog["L3P1"]),\
int(datalog["L3P2"]),\
datalog["TAF"],\
datalog["DAF"],\
datalog["SAF"],\
int(datalog["State"]),\
datalog["isTimeoutOccurring"],\
float(datalog["RTTactEst"]) ])
def handle_flow_vegas_debug (datamap, datalog):
if datalog["measurement"] == "fullvegasdebug":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
datalog["SendReceive"],\
int(datalog["whichPacket"]),\
int(datalog["EPIT"]),\
int(datalog["LPIA"]),\
float(datalog["WS"]),\
float(datalog["STT"]),\
int(datalog["L3P0"]),\
int(datalog["L3P1"]),\
int(datalog["L3P2"]),\
datalog["TAF"],\
datalog["DAF"],\
datalog["SAF"],\
int(datalog["State"]),\
datalog["FlagObserveRTT"],\
datalog["FlagRampWS"],\
datalog["isTimeoutOccurring"],\
float(datalog["RTTmin"]),\
float(datalog["RTTactEst"]),\
int(datalog["ICAPTUW"]) ])
def handle_flow_true_fast_debug (datamap, datalog):
if datalog["measurement"] == "fulltruefastdebug":
if not (datalog["flowid"] in datamap.keys()):
datamap[datalog["flowid"]] = {}
if not (datalog["measurement"] in datamap[datalog["flowid"]].keys()):
datamap[datalog["flowid"]][datalog["measurement"]] = []
datamap[datalog["flowid"]][datalog["measurement"]].append(\
[ float(datalog["ms_globaltime"]), \
datalog["SendReceive"],\
int(datalog["whichPacket"]),\
int(datalog["EPIT"]),\
int(datalog["LPIA"]),\
float(datalog["WS"]),\
float(datalog["STT"]),\
int(datalog["L3P0"]),\
int(datalog["L3P1"]),\
int(datalog["L3P2"]),\
datalog["TAF"],\
datalog["DAF"],\
datalog["SAF"],\
datalog["isTimeoutOccurring"],\
float(datalog["RTTmin"]),\
float(datalog["RTTmax"]),\
float(datalog["RTTactEst"]) ])
# Breaks time into ms_window chunks and sums values within bins
def windowed_sum(times, values, ms_window):
windowed_time = []
windowed_values = []
final_base_time = 0.0
update_bin_flag = True
k = 0
while k < len(times):
if update_bin_flag is True:
current_base_time = final_base_time
final_base_time = current_base_time + ms_window
current_bin_time = final_base_time - float(ms_window)/2
current_value_sum = 0.0
update_bin_flag = False
if times[k] <= final_base_time:
current_value_sum += values[k]
k += 1
else:
windowed_time.append(current_bin_time)
windowed_values.append(current_value_sum)
update_bin_flag = True
return (windowed_time, windowed_values)
# Takes a time array (ms) and a values array measured at those times
# The values must be levels, not additive quantities. E.g. buffer occupancy.
# Returns the windowed time-average of the values array binned into
# blocks of ms_window.
# Start at time 0, and for every event, keep track of forward inter-arrival t.
# Weight values by their time-to-next sample (level is constant till then)
# Divide by total ms_window to get value for that bin.
# Going forward, to the next window, remember the old value as the
# "starting state" (do NOT reset to 0)
def windowed_time_average(times, values, ms_window, initial_value):
windowed_time = []
windowed_values = []
final_base_time = 0.0
update_bin_flag = True
k = 0
while k < len(times):
if update_bin_flag is True:
update_bin_flag = False
current_base_time = final_base_time
final_base_time = current_base_time + ms_window
current_bin_time = final_base_time - float(ms_window)/2
if k == 0:
current_value = initial_value
if times[k] <= final_base_time:
current_value_time = times[k] - current_base_time
current_sum = current_value * current_value_time
else:
current_value_time = ms_window
current_sum = current_value * current_value_time
windowed_time.append(current_bin_time)
windowed_values.append(current_sum/ms_window)
update_bin_flag = True
continue
current_value = values[k]
if (k+1) < len(times):
nexteventtime = times[k+1]
else:
nexteventtime = final_base_time + 1
if nexteventtime <= final_base_time:
current_value_time = times[k+1] - times[k]
current_sum += current_value * current_value_time
else:
current_value_time = ms_window - (times[k] - current_base_time)
current_sum += current_value * current_value_time
windowed_time.append(current_bin_time)
windowed_values.append(current_sum/ms_window)
update_bin_flag = True
k += 1
return (windowed_time, windowed_values)
'''
Confirm windowed time average function returns proper results
for simple test cases:
Test1: First window is empty
ms_window = 10 ms
data = [[11ms, 1],[12ms,2],[13ms,1], [22ms,2]]
initial_value = 0
Result Expected:
t v
5 0
15 0*0.1 + 1*0.1 + 2*0.1 + 1*0.7 = 1.0
25 1*0.2 + 2 * 0.8 = 1.8
Test2: Second window is empty, non-zero initial value
ms_window = 8 ms
data = [[6ms, 2],[17ms,5],[23ms,1]]
initial_value = 1
Result Expected:
t v
4 0.75*1 + 0.25*2 = 1.25
12 2
20 0.125*2 + 0.75*5 + 0.125*1 = 0.25 + 3.75 + 0.125 = 4.125
Last Verified on 14 Nov 2015, 11 PM, by Sushant Sundaresh
Added to unit tests
'''
def test_windowed_time_average ():
names = ["Test1", "Test2"]
args = [([11.,12.,13.,22.], [1.,2.,1.,2.], 10., 0.),\
([6.,17.,23.],[2.,5.,1.],8.,1.)]
exps = [([5.,15.,25.], [0., 1.0, 1.8]),\
([4.,12.,20.],[1.25,2.,4.125])]
passFlag = True
for j in xrange(len(names)):
# print names[j]
t, v, w, i = args[j]
te, ve = exps[j]
ta, va = windowed_time_average(t,v,w,i)
for k in xrange(len(te)):
passFlag = passFlag and (ta[k] == te[k]) and (va[k] == ve[k])
return passFlag
# Element ID must be link string ID
# Will break if no data matches the specified element in your simulation logs
def plot_bufferoccupancy(datamap, linkID, ms_window, axes):
if linkID in datamap.keys():
epsilon = 10**-7
rtl_ms_times = [val[0] for val in datamap[linkID]["bufferoccupancy"][constants.RTL]]
ltr_ms_times = [val[0] for val in datamap[linkID]["bufferoccupancy"][constants.LTR]]
rtl_frac_occupancy = [val[1] for val in datamap[linkID]["bufferoccupancy"][constants.RTL]]
ltr_frac_occupancy = [val[1] for val in datamap[linkID]["bufferoccupancy"][constants.LTR]]
rtl_t, rtl_fo = windowed_time_average(rtl_ms_times, rtl_frac_occupancy, ms_window, 0.0) # buffers start empty
ltr_t, ltr_fo = windowed_time_average(ltr_ms_times, ltr_frac_occupancy, ms_window, 0.0) # buffers start empty
rtl_t = np.array([val/1000 for val in rtl_t]) # s
ltr_t = np.array([val/1000 for val in ltr_t]) # s
rtl_fo = np.array([100*val+epsilon for val in rtl_fo]) # %
ltr_fo = np.array([100*val+epsilon for val in ltr_fo]) # %
l1, l2 = axes.semilogy(rtl_t, rtl_fo,'kx-',ltr_t,ltr_fo,'r.-')
axes.set_ylabel("Left|Right Buffer [%s Full]" % '%')
axes.legend((l1,l2), ('Right-to-Left','Left-to-Right'), 'upper right')
axes.grid(True)
'''For ms_window time-windowing, need window >> timescale of events (10x PROPDELAY for links...)'''
def plot_linkrate (datamap, linkID, ms_window, axes):
if linkID in datamap.keys():
ms_times = [val[0] for val in datamap[linkID]["linkrate"]]
mbit_transfers = [val[1] for val in datamap[linkID]["linkrate"]]
t, mb = windowed_sum(ms_times, mbit_transfers, ms_window)
t = np.array([val/1000 for val in t]) # s
mbps = np.array([1000*val / ms_window for val in mb]) # Mbps
axes.plot(t, mbps,'k.-')
axes.set_ylabel("Mbps")
axes.grid(True)
'''For ms_window time-windowing, need window >> timescale of events (10x PROPDELAY for links...)'''
def plot_flow_rate (ms,mbits,label,ms_window,axes):
t, mb = windowed_sum(ms, mbits,ms_window)
t = np.array([val/1000 for val in t]) # s
mbps = np.array([1000.0*val / ms_window for val in mb]) # Mbps
axes.plot(t, mbps,'k.-')
axes.set_ylabel(label)
axes.grid(True)
# Usually there are too many of these points to integrate quickly
def plot_flow_window(ms,pkts,label,ms_window,axes):
t, w = windowed_time_average(ms, pkts, ms_window, 1.0) # W0=1 for all dynamic TCPs
t = np.array([val/1000 for val in t]) # s
w = np.array(w) # packets
axes.plot(t, w,'k.-')
axes.set_ylabel(label)
axes.grid(True)
def plot_flow_loss (ms,pkts,label,ms_window,axes):
t, plost = windowed_sum(ms, pkts, ms_window)
t = np.array([val/1000 for val in t]) # s
plost = np.array(plost) # packets
axes.plot(t, plost,'k.-')
axes.set_ylabel(label)
plt.grid(True)
# Usually there are too many of these points to integrate quickly
def plot_flow_delay (ms,ms_delay,label,ms_window,axes):
t, d = windowed_time_average(ms, ms_delay, ms_window, 0) # delay0=0 for our simulations
t = np.array([val/1000 for val in t]) # s
d = np.array(d) # ms
axes.plot(t, d,'k.-')
axes.set_ylabel(label)
plt.grid(True)
# Reference: http://stackoverflow.com/questions/273192/in-python-check-if-a-directory-exists-and-create-it-if-necessary
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if __name__ == "__main__":
if (len(sys.argv) == 2 and sys.argv[1] == "--help") or (len(sys.argv) != 5 or sys.argv[1] != "--testcase" or sys.argv[3] != "--tcp" or sys.argv[4] not in ["fast","vegas","reno"]):
print "Usage: python visualize.py --testcase testcase.json --tcp [fast|vegas|reno]\n"
sys.exit(1)
measurementFilename = os.path.join('results','all_measurements.txt')
testImageFilename = os.path.join(os.path.join('results','plots'), "test.jpeg")
testRawDataFilename = os.path.join(os.path.join('results','rawdata'), "test.jpeg")
ensure_dir(measurementFilename)
ensure_dir(testImageFilename)
ensure_dir(testRawDataFilename)
testCase = sys.argv[2]
tcp = sys.argv[4]
for f in os.listdir("results"):
if not os.path.isdir(os.path.join('results',f)):
print "Cleaning up... removing %s" % os.path.join('results', f)
os.remove(os.path.join('results', f))
for f in os.listdir(os.path.join('results','plots')):
print "Cleaning up... removing %s" % os.path.join(os.path.join('results','plots'), f)
os.remove(os.path.join(os.path.join('results','plots'), f))
for f in os.listdir(os.path.join('results','rawdata')):
print "Cleaning up... removing %s" % os.path.join(os.path.join('results','rawdata'), f)
os.remove(os.path.join(os.path.join('results','rawdata'), f))
print "Simulating network..."
# Run Main Loop on Test Case 1, temporarily redirecting STDOUT
# STDERR will report progress.
sys.stdout = open(measurementFilename, 'w')
element_map = MainLoop().simulate(testCase,tcp)
sys.stdout = sys.__stdout__
print "Done simulating..."
print "Parsing results..."
# element id and measurement type to data map
# keyed as ['l1']['linkrate']
eimtod = {}
# Parse out measurements from measurements file
with open(measurementFilename) as m:
for line in m:
try:
log = json.loads(line)
if log["logtype"] == "measurement":
handle_linkrate(eimtod, log)
handle_buffer_occupancy(eimtod, log)
handle_packet_loss(eimtod, log)
handle_flowrate(eimtod, log)
handle_flow_window(eimtod, log)
handle_flow_state(eimtod, log)
handle_packets_outstanding(eimtod, log)
handle_flow_reno_debug(eimtod, log)
handle_flow_true_fast_debug(eimtod, log)
handle_flow_vegas_debug(eimtod, log)
# others
except ValueError:
pass
except KeyError:
raise
# Dump parsed measurements for visual debugging
for element in eimtod.keys():
for measurement in eimtod[element].keys():
if isinstance(eimtod[element][measurement],dict):
# more layers
for dataclass in eimtod[element][measurement].keys():
# actual data
with open(os.path.join(os.path.join('results','rawdata'),\
"%s_%s_%s.txt"%(element,measurement,dataclass)),'w') as f:
f.write("time\t\tvalue\n")
for t,v in eimtod[element][measurement][dataclass]:
f.write("%0.6e\t\t%0.6e\n"%(t,v))
else:
# actual data. handle debug dumps separately
# these aren't just for debugging; they have really useful
# data. we just aren't doing anything with most of it.
with open(os.path.join(os.path.join('results','rawdata'),\
"%s_%s.txt"%(element,measurement)),'w') as f:
if measurement == "outstandingpackets":
f.write("time\t\tout\t\tleft\t\tintransit\t\tackd\t\ttotal\n")
for t,v1,v2,v3,v4,v5 in eimtod[element][measurement]:
f.write("%0.6e\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\n"%(t,v1,v2,v3,v4,v5))
elif measurement == "fullrenodebug":
f.write("time\t\tReason\t\tPacketID\t\tEPIT\t\tLPIA\t\tWS\t\tCAT\t\tSTT\t\t[L3P0\t\tL3P1\t\tL3P2]\t\tTAF\t\tDAF\t\tSAF\t\tState\t\tTimeoutOccurred\t\tRTTEst\n")
for t,SendReceive,whichPacket,EPIT,LPIA,WS,CAT,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,State,TO,RTTEst in eimtod[element][measurement]:
f.write("%0.6e\t\t%s\t\t%d\t\t%d\t\t%d\t\t%0.3e\t\t%0.3e\t\t%0.6e\t\t[%d\t\t%d\t\t%d]\t\t%s\t\t%s\t\t%s\t\t%d\t\t%s\t\t%0.6e\n"%(t,SendReceive,whichPacket,EPIT,LPIA,WS,CAT,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,State,TO,RTTEst))
elif measurement == "fullvegasdebug":
f.write("time\t\tReason\t\tPacketID\t\tEPIT\t\tLPIA\t\tWS\t\tSTT\t\t[L3P0\t\tL3P1\t\tL3P2]\t\tTAF\t\tDAF\t\tSAF\t\tState\t\tObserve\t\tRamp\t\tTimeoutOccurred\t\tRTTmin\t\tRTTAct\t\tPacketsTillCanChangeWS\n")
for t,SendReceive,whichPacket,EPIT,LPIA,WS,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,State,FlagO,FlagR,TO,RTTm,RTTa,ICAPTUW in eimtod[element][measurement]:
f.write("%0.6e\t\t%s\t\t%d\t\t%d\t\t%d\t\t%0.3e\t\t%0.6e\t\t[%d\t\t%d\t\t%d]\t\t%s\t\t%s\t\t%s\t\t%d\t\t%s\t\t%s\t\t%s\t\t%0.6e\t\t%0.6e\t\t%d\n"%(t,SendReceive,whichPacket,EPIT,LPIA,WS,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,State,FlagO,FlagR,TO,RTTm,RTTa,ICAPTUW))
elif measurement == "fulltruefastdebug":
f.write("time\t\tReason\t\tPacketID\t\tEPIT\t\tLPIA\t\tWS\t\tSTT\t\t[L3P0\t\tL3P1\t\tL3P2]\t\tTAF\t\tDAF\t\tSAF\t\tTimeoutOccurred\t\tRTTmin\t\tRTTmax\t\tRTTAct\n")
for t,SendReceive,whichPacket,EPIT,LPIA,WS,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,TO,RTTmi,RTTma,RTTac in eimtod[element][measurement]:
f.write("%0.6e\t\t%s\t\t%d\t\t%d\t\t%d\t\t%0.3e\t\t%0.6e\t\t%d\t\t%d\t\t%d\t\t%s\t\t%s\t\t%s\t\t%s\t\t%0.6e\t\t%0.6e\t\t%0.6e\n"%(t,SendReceive,whichPacket,EPIT,LPIA,WS,STT,L3P0,L3P1,L3P2,TAF,DAF,SAF,TO,RTTmi,RTTma,RTTac))
else:
f.write("time\t\tvalue\n")
for t,v in eimtod[element][measurement]:
f.write("%0.6e\t\t%0.6e\n"%(t,v))
print "Done parsing results..."
print "Plotting results..."
'''
Want to plot, for each network element for which these data are available:
1 link rate (mpbs)
1 bin-averaged
2 buffer occupancy (%)
2 time-averaged
3 packet loss (packets)
3 bin-sum
4 flow rate (Mbps)
4 bin-averaged
5 flow window size (packets)
5 time-averaged
6 packet delay (ms)
6 event trace (solid line)
All will be black lines, solid, or single points, dotted.
Plots will be totally separated.
This code below is sensitive to LACK of data. It will likely break
if any of the expected data for standard plots is not found
in your simulation for some reason (weird locked routing, etc.)
'''
ms_window = constants.MS_WINDOW
for (d,v) in element_map.items():
if isinstance(v, Link):
myname = "Link %s"%v.get_id()
print "for %s..."%myname
myid = v.get_id()
all_plots = plt.figure()
linkrate_ax = all_plots.add_subplot(211)
buffocc_ax = all_plots.add_subplot(212)
plot_linkrate(eimtod, myid, ms_window, linkrate_ax)
plot_bufferoccupancy(eimtod, myid, ms_window, buffocc_ax)
linkrate_ax.set_title("%s Trace"%myname)
buffocc_ax.set_xlabel('Seconds')
all_plots.savefig(os.path.join(os.path.join('results','plots'),"%s.jpeg"%myid))
plt.close()
elif isinstance(v,Data_Source):
myid = v.get_id()
myname = myid.split('_')[0]
print "for Flow %s..."%myname
mysink = "%s_%s"%(myname,"dest") # see jsonparser.py
all_data = []
pltCount = 0
plot_functions = []
if isinstance(v, Working_Data_Source_TCP_RENO):
# guaranteed to have this data
mydata = eimtod[myid]["fullrenodebug"]
mytimes = [val[0] for val in mydata] # ms
myWS = [val[5] for val in mydata] # packets
myDelay = [val[16] for val in mydata] # ms
elif isinstance(v, Working_Data_Source_TCP_VEGAS):
# guaranteed to have this data
mydata = eimtod[myid]["fullvegasdebug"]
mytimes = [val[0] for val in mydata] # ms
myWS = [val[5] for val in mydata] # packets
myDelay = [val[18] for val in mydata] # ms
elif isinstance(v, Working_Data_Source_TCP_FAST):
# guaranteed to have this data
mydata = eimtod[myid]["fulltruefastdebug"]
mytimes = [val[0] for val in mydata] # ms
myWS = [val[5] for val in mydata] # packets
myDelay = [val[16] for val in mydata] # ms
plot_functions.append(lambda ((ms,dat,label,ms_window,axes)): plot_flow_window(ms,dat,label,ms_window,axes))
plot_functions.append(lambda ((ms,dat,label,ms_window,axes)): plot_flow_delay(ms,dat,label,ms_window,axes))
all_data.append([mytimes,myWS,'Window (pkts)'])
all_data.append([mytimes,myDelay,'RTT (ms)'])
pltCount += 2
pkLossFlag = False
if "packetloss" in eimtod[myid].keys():
mydata = eimtod[myid]["packetloss"]
myLossTime = [val[0] for val in mydata] # ms
myLoss = [val[1] for val in mydata] # 0, 1
all_data.append([myLossTime,myLoss,"Loss (pkts)"])
pltCount += 1
pkLossFlag = True
plot_functions.append(lambda ((ms,dat,label,ms_window,axes)): plot_flow_loss(ms,dat,label,ms_window,axes))
if "flowrate" in eimtod[mysink].keys():
mydata = eimtod[mysink]["flowrate"]
myRateTime = [val[0] for val in mydata] # ms
myRate = [val[1] for val in mydata] # mbits
all_data.append([myRateTime,myRate,"Mbps"])
pltCount += 1
plot_functions.append(lambda ((ms,dat,label,ms_window,axes)): plot_flow_rate(ms,dat,label,ms_window,axes))
all_plots = plt.figure()
myaxes = []
flow_ws_ax = all_plots.add_subplot(pltCount,1,1)
myaxes.append(flow_ws_ax)
flow_delay_ax = all_plots.add_subplot(pltCount,1,2)
myaxes.append(flow_delay_ax)
if pltCount == 3 and pkLossFlag:
flow_loss_ax = all_plots.add_subplot(pltCount,1,3)
myaxes.append(flow_loss_ax)
elif pltCount == 3:
flow_rate_ax = all_plots.add_subplot(pltCount,1,3)
myaxes.append(flow_rate_ax)
elif pltCount > 3:
flow_loss_ax = all_plots.add_subplot(pltCount,1,3)
myaxes.append(flow_loss_ax)
flow_rate_ax = all_plots.add_subplot(pltCount,1,4)
myaxes.append(flow_rate_ax)
for m in xrange(pltCount):
plot_functions[m]((all_data[m][0],all_data[m][1],all_data[m][2],ms_window,myaxes[m]))
myaxes[0].set_title("%s Trace"%myname)
myaxes[len(myaxes)-1].set_xlabel('Seconds')
all_plots.savefig(os.path.join(os.path.join('results','plots'),"%s.jpeg"%myname))
plt.close()
else:
continue
print "Done plotting results..."
print "Goodbye!"
sys.exit(0)
|
gpl-2.0
|
pprett/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
99
|
4163
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
sameera2004/chxtools
|
chxtools/pv_explorer/model.py
|
5
|
3202
|
from atom.api import *
from matplotlib.figure import Figure
from matplotlib.axes import Axes
import numpy as np
import datetime
import os
def _get_from_channel_archiver(pv_name, t0=0, t1=10):
x = np.linspace(t0, t1, 1000)
y = np.sin(x) * 10
y += np.random.randn(len(x))
return x, y
class Model(Atom):
pv1 = Str()
pv2 = Str()
pv3 = Str()
pv4 = Str()
_fig = Typed(Figure)
_data = Dict()
autolim_axes = Bool(True)
_axes = Dict()
t0 = Float()
t1 = Float()
dt0 = Typed(datetime.datetime)
data_file = Str()
def __init__(self):
with self.suppress_notifications():
# plotting initialization
self.dt0 = datetime.datetime.utcnow()
self._fig = Figure(figsize=(1, 1))
self._fig.set_tight_layout(True)
pvs = ['pv1', 'pv2', 'pv3', 'pv4']
for idx, (name, position) in enumerate(zip(pvs,
range(1, len(pvs)+1))):
if idx == 0:
sharex = None
else:
sharex = self._axes[pvs[0]]
self._axes[name] = self._fig.add_subplot(
len(pvs)+1, 1, position, sharex=sharex)
self._axes['data'] = self._fig.add_subplot(
len(pvs)+1, 1, len(pvs)+1, sharex=self._axes[pvs[0]])
@observe('data_file')
def datafiles_changed(self, changed):
# load your data
# for now lets fake it
x = np.linspace(self.t0, self.t1, 1000)
y = np.cos(x) * 10
y += np.random.randn(len(x))
self._axes['data'].cla()
self._axes['data'].plot(x, y, label=self.data_file.split(os.sep)[-1])
self._axes['data'].legend(loc=0)
self.reformat_view()
@observe('dt0')
def dt0_changed(self, changed):
print(changed)
@observe('pv1', 'pv2', 'pv3', 'pv4')
def get_pv1(self, changed):
print(changed)
# get the data from the channel archiver
pv_name = changed['value']
axes = self._axes[changed['name']]
axes.set_ylabel(pv_name)
self._update_data(pv_name, axes)
def _update_data(self, pv_name, axes):
x, y = _get_from_channel_archiver(pv_name, self.t0, self.t1)
# self._data[pv_name] = (x, y)
axes.cla()
axes.plot(x, y, label=pv_name)
axes.legend(loc=0)
self.reformat_view()
@observe('t0', 't1')
def change_time(self, changed):
for k, axes in self._axes.items():
if k == 'data':
continue
pv_name = getattr(self, k)
self._update_data(pv_name, axes)
def reformat_view(self, *args, **kwargs):
"""
Recompute the limits, rescale the view, reformat the legend and redraw
the canvas
"""
# ignore the args and kwargs. They are here so that any function can be
# connected to this one
try:
for k, ax in self._axes.items():
ax.relim(visible_only=True)
ax.autoscale_view(tight=True)
self._fig.canvas.draw()
except AttributeError:
pass
|
bsd-3-clause
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/matplotlib/bezier.py
|
10
|
15695
|
"""
A module providing some utility functions regarding bezier path manipulation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.path import Path
from operator import xor
import warnings
class NonIntersectingPathException(ValueError):
pass
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a * d - b * c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_ * line1_rhs + b_ * line2_rhs
y = c_ * line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*, return
locations of the two points located along its perpendicular line at the
distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1 - t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
# FIXME spelling mistake in the name of the parameter ``tolerence``
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise NonIntersectingPathException(
"the segment does not seem to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0] - end[0]) ** 2 + \
(start[1] - end[1]) ** 2 < tolerence ** 2:
return t0, t1
# calculate the middle point
middle_t = 0.5 * (t0 + t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment(object):
"""
A simple class of a 2-dimensional bezier segment
"""
# Higher order bezier lines can be supported by simplying adding
# corresponding values.
_binom_coeff = {1: np.array([1., 1.]),
2: np.array([1., 2., 1.]),
3: np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:, 0]
yy = _control_points[:, 1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1. - t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside
the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t * r + cx, sin_t * r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax,
tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = list(zip(bezier_path[::2], bezier_path[1::2]))
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):
""" returns
* 1 if two lines are parralel in same direction
* -1 if two lines are parralel in opposite direction
* 0 otherwise
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = np.abs(theta1 - theta2)
if dtheta < tolerence:
return 1
elif np.abs(dtheta - np.pi) < tolerence:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic bezier control points *bezier2*, returns
control points of quadratic bezier lines roughly parallel to given
one separated by *width*.
"""
# The parallel bezier lines are constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the
# bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
warnings.warn(
"Lines do not intersect. A straight line is used instead.")
#cmx, cmy = 0.5*(c1x+c2x), 0.5*(c1y+c2y)
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
if parallel_test != 0:
# a special case for a straight line, i.e., angle between two
# lines are smaller than some (arbitrtay) value.
cmx_left, cmy_left = \
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
cmx_right, cmy_right = \
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
else:
cmx_left, cmy_left = \
get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = \
get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns control points of two quadrativ
bezier lines having a width roughly parralel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
def make_path_regular(p):
"""
fill in the codes if None.
"""
c = p.codes
if c is None:
c = np.empty(p.vertices.shape[:1], "i")
c.fill(Path.LINETO)
c[0] = Path.MOVETO
return Path(p.vertices, c)
else:
return p
def concatenate_paths(paths):
"""
concatenate list of paths into a single path.
"""
vertices = []
codes = []
for p in paths:
p = make_path_regular(p)
vertices.append(p.vertices)
codes.append(p.codes)
_path = Path(np.concatenate(vertices),
np.concatenate(codes))
return _path
|
gpl-2.0
|
jjx02230808/project0223
|
examples/plot_isotonic_regression.py
|
303
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
YinongLong/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
28
|
3652
|
from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
|
bsd-3-clause
|
JosmanPS/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
218
|
3893
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
SnakeJenny/TensorFlow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
72
|
12865
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
quantopian/odo
|
odo/backends/tests/test_hdfstore.py
|
5
|
4628
|
from __future__ import absolute_import, division, print_function
import pytest
from contextlib import contextmanager
from odo.utils import tmpfile
from odo.chunks import chunks
from odo import into, append, convert, resource, discover, odo
import datashape
import pandas as pd
from datetime import datetime
import numpy as np
pytest.importorskip('tables')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
@contextmanager
def file(df):
with tmpfile('.hdf5') as fn:
f = pd.HDFStore(fn)
f.put('/data', df, format='table', append=True)
try:
yield fn, f, f.get_storer('/data')
finally:
f.close()
def test_discover():
with file(df) as (fn, f, dset):
assert str(discover(dset)) == str(discover(df))
assert str(discover(f)) == str(discover({'data': df}))
def test_discover_nested():
with tmpfile('hdf5') as fn:
df.to_hdf(fn, '/a/b/data')
df.to_hdf(fn, '/a/b/data2')
df.to_hdf(fn, '/a/data')
hdf = pd.HDFStore(fn)
try:
assert discover(hdf) == discover(
{'a': {'b': {'data': df, 'data2': df}, 'data': df}}
)
finally:
hdf.close()
def eq(a, b):
if isinstance(a, pd.DataFrame):
a = into(np.ndarray, a)
if isinstance(b, pd.DataFrame):
b = into(np.ndarray, b)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset)
assert eq(convert(np.ndarray, c), df)
def test_resource_no_info():
with tmpfile('.hdf5') as fn:
r = resource('hdfstore://' + fn)
try:
assert isinstance(r, pd.HDFStore)
finally:
r.close()
def test_resource_of_dataset():
with tmpfile('.hdf5') as fn:
ds = datashape.dshape('{x: int32, y: 3 * int32}')
r = resource('hdfstore://'+fn+'::/x', dshape=ds)
try:
assert r
finally:
r.parent.close()
def test_append():
with file(df) as (fn, f, dset):
append(dset, df)
append(dset, df)
assert discover(dset).shape == (len(df) * 3,)
def test_into_resource():
with tmpfile('.hdf5') as fn:
d = into('hdfstore://' + fn + '::/x', df)
try:
assert discover(d) == discover(df)
assert eq(into(pd.DataFrame, d), df)
finally:
d.parent.close()
def test_convert_pandas():
with file(df) as (fn, f, dset):
assert eq(convert(pd.DataFrame, dset), df)
def test_convert_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
assert len(list(c)) == 2
assert eq(convert(pd.DataFrame, c), df)
def test_append_chunks():
with file(df) as (fn, f, dset):
append(dset, chunks(pd.DataFrame)([df, df]))
assert discover(dset).shape[0] == len(df) * 3
def test_append_other():
with tmpfile('.hdf5') as fn:
x = into(np.ndarray, df)
dset = into('hdfstore://'+fn+'::/data', x)
try:
assert discover(dset) == discover(df)
finally:
dset.parent.close()
def test_fixed_shape():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
try:
assert isinstance(r.shape, list)
assert discover(r).shape == (len(df),)
finally:
r.parent.close()
def test_fixed_convert():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
try:
assert eq(convert(pd.DataFrame, r), df)
finally:
r.parent.close()
def test_append_vs_write():
import pandas.util.testing as tm
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo', append=True)
store = odo(df, 'hdfstore://%s::foo' % fn)
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, pd.concat([df, df]))
with tmpfile('.hdf5') as fn:
store = odo(df, 'hdfstore://%s::foo' % fn, mode='w')
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, df)
|
bsd-3-clause
|
einarhuseby/arctic
|
tests/integration/store/test_version_store_audit.py
|
4
|
8283
|
from bson import ObjectId
from datetime import datetime as dt
from mock import patch
from pandas.util.testing import assert_frame_equal
from pymongo.errors import OperationFailure
import pytest
from arctic.store.audit import ArcticTransaction
from arctic.exceptions import ConcurrentModificationException, NoDataFoundException
from ...util import read_str_as_pandas
ts1 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
ts2 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 4.0
2012-10-09 17:06:11.040 | 4.5
2012-10-10 17:06:11.040 | 5.0
2012-11-08 17:06:11.040 | 3.0""")
ts3 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 4.0
2012-10-09 17:06:11.040 | 4.5
2012-10-10 17:06:11.040 | 5.0
2012-11-08 17:06:11.040 | 3.0
2012-11-09 17:06:11.040 | 44.0""")
ts1_append = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0
2012-11-09 17:06:11.040 | 3.0""")
symbol = 'TS1'
def test_ArcticTransaction_can_do_first_writes(library):
with ArcticTransaction(library, 'SYMBOL_NOT_HERE', 'user', 'log') as cwb:
cwb.write('SYMBOL_NOT_HERE', ts1)
wrote_vi = library.read('SYMBOL_NOT_HERE')
assert_frame_equal(wrote_vi.data, ts1)
def test_ArcticTransaction_detects_concurrent_writes(library):
library.write('FOO', ts1)
from threading import Event, Thread
e1 = Event()
e2 = Event()
def losing_writer():
#will attempt to write version 2, should find that version 2 is there and it ends up writing version 3
with pytest.raises(ConcurrentModificationException):
with ArcticTransaction(library, 'FOO', 'user', 'log') as cwb:
cwb.write('FOO', ts1_append, metadata={'foo': 'bar'})
e1.wait()
def winning_writer():
#will attempt to write version 2 as well
with ArcticTransaction(library, 'FOO', 'user', 'log') as cwb:
cwb.write('FOO', ts2, metadata={'foo': 'bar'})
e2.wait()
t1 = Thread(target=losing_writer)
t2 = Thread(target=winning_writer)
t1.start()
t2.start()
# both read the same timeseries and are locked doing some 'work'
e2.set()
# t2 should now be able to finish
t2.join()
e1.set()
t1.join()
# we're expecting the losing_writer to undo its write once it realises that it wrote v3 instead of v2
wrote_vi = library.read('FOO')
assert_frame_equal(wrote_vi.data, ts2)
assert {'foo': 'bar'} == wrote_vi.metadata
def test_audit_writes(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
with ArcticTransaction(library, symbol, 'u2', 'l2') as mt:
mt.write(symbol, ts2)
audit_log = library.read_audit_log(symbol)
assert audit_log == [{u'new_v': 2, u'symbol': u'TS1', u'message': u'l2', u'user': u'u2', u'orig_v': 1},
{u'new_v': 1, u'symbol': u'TS1', u'message': u'l1', u'user': u'u1', u'orig_v': 0}]
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['orig_v']).data)
assert_frame_equal(ts2, library.read(symbol, audit_log[0]['new_v']).data)
def test_metadata_changes_writes(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1, metadata={'original': 'data'})
with ArcticTransaction(library, symbol, 'u2', 'l2') as mt:
mt.write(symbol, ts1, metadata={'some': 'data', 'original': 'data'})
audit_log = library.read_audit_log(symbol)
assert audit_log == [{u'new_v': 2, u'symbol': u'TS1', u'message': u'l2', u'user': u'u2', u'orig_v': 1},
{u'new_v': 1, u'symbol': u'TS1', u'message': u'l1', u'user': u'u1', u'orig_v': 0}]
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['orig_v']).data)
assert_frame_equal(ts1, library.read(symbol, audit_log[0]['new_v']).data)
assert library.read(symbol, audit_log[0]['orig_v']).metadata == {'original': 'data'}
assert library.read(symbol, audit_log[0]['new_v']).metadata == {'some': 'data', 'original': 'data'}
def test_cleanup_orphaned_versions_integration(library):
_id = ObjectId.from_datetime(dt(2013, 1, 1))
with patch('bson.ObjectId', return_value=_id):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
assert library._versions.find({'parent': {'$size': 1}}).count() == 1
library._cleanup_orphaned_versions(False)
assert library._versions.find({'parent': {'$size': 1}}).count() == 1
def test_corrupted_read_writes_new(library):
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts1)
res = library.read(symbol)
assert res.version == 1
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts2)
res = library.read(symbol)
assert res.version == 2
with patch.object(library, 'read') as l:
l.side_effect = OperationFailure('some failure')
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts3, metadata={'a': 1, 'b': 2})
res = library.read(symbol)
# Corrupted data still increments on write to next version correctly with new data
assert res.version == 3
assert_frame_equal(ts3, library.read(symbol, 3).data)
assert res.metadata == {'a': 1, 'b': 2}
with patch.object(library, 'read') as l:
l.side_effect = OperationFailure('some failure')
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts3, metadata={'a': 1, 'b': 2})
res = library.read(symbol)
# Corrupted data still increments to next version correctly with ts & metadata unchanged
assert res.version == 4
assert_frame_equal(ts3, library.read(symbol, 4).data)
assert res.metadata == {'a': 1, 'b': 2}
def test_write_after_delete(library):
with ArcticTransaction(library, symbol, 'u1', 'l') as mt:
mt.write(symbol, ts1)
library.delete(symbol)
with ArcticTransaction(library, symbol, 'u1', 'l') as mt:
mt.write(symbol, ts1_append)
assert_frame_equal(library.read(symbol).data, ts1_append)
def test_ArcticTransaction_write_skips_for_exact_match(library):
ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.674
2014-11-13 21:30:00.000 | 193964.45
2014-11-14 21:30:00.000 | 193650.403""")
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, ts)
version = library.read(symbol).version
# try and store same TimeSeries again
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, ts)
assert library.read(symbol).version == version
def test_ArcticTransaction_write_doesnt_skip_for_close_ts(library):
orig_ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.674
2014-11-13 21:30:00.000 | 193964.45
2014-11-14 21:30:00.000 | 193650.403""")
with ArcticTransaction(library, symbol, 'u1', 'l1') as mt:
mt.write(symbol, orig_ts)
assert_frame_equal(library.read(symbol).data, orig_ts)
# try and store slighty different TimeSeries
new_ts = read_str_as_pandas("""times | PX_LAST
2014-10-31 21:30:00.000 | 204324.672
2014-11-13 21:30:00.000 | 193964.453
2014-11-14 21:30:00.000 | 193650.406""")
with ArcticTransaction(library, symbol, 'u1', 'l2') as mt:
mt.write(symbol, new_ts)
assert_frame_equal(library.read(symbol).data, new_ts)
|
lgpl-2.1
|
sunqm/pyscf
|
examples/pbc/09-band_ase.py
|
1
|
1824
|
import pyscf.pbc.tools.pyscf_ase as pyscf_ase
import pyscf.pbc.gto as pbcgto
import pyscf.pbc.dft as pbcdft
import matplotlib.pyplot as plt
from ase.lattice import bulk
from ase.dft.kpoints import sc_special_points as special_points, get_bandpath
c = bulk('C', 'diamond', a=3.5668)
print(c.get_volume())
cell = pbcgto.Cell()
cell.atom = pyscf_ase.ase_atoms_to_pyscf(c)
cell.a = c.cell
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.verbose = 5
cell.build(None,None)
points = special_points['fcc']
G = points['G']
X = points['X']
W = points['W']
K = points['K']
L = points['L']
band_kpts, kpath, sp_points = get_bandpath([L, G, X, W, K, G], c.cell, npoints=50)
band_kpts = cell.get_abs_kpts(band_kpts)
#
# band structure from Gamma point sampling
#
mf = pbcdft.RKS(cell)
print(mf.kernel())
e_kn = mf.get_bands(band_kpts)[0]
vbmax = -99
for en in e_kn:
vb_k = en[cell.nelectron//2-1]
if vb_k > vbmax:
vbmax = vb_k
e_kn = [en - vbmax for en in e_kn]
#
# band structure from 222 k-point sampling
#
kmf = pbcdft.KRKS(cell, cell.make_kpts([2,2,2]))
print(kmf.kernel())
e_kn_2 = kmf.get_bands(band_kpts)[0]
vbmax = -99
for en in e_kn_2:
vb_k = en[cell.nelectron//2-1]
if vb_k > vbmax:
vbmax = vb_k
e_kn_2 = [en - vbmax for en in e_kn_2]
au2ev = 27.21139
emin = -1*au2ev
emax = 1*au2ev
plt.figure(figsize=(5, 6))
nbands = cell.nao_nr()
for n in range(nbands):
plt.plot(kpath, [e[n]*au2ev for e in e_kn], color='#87CEEB')
plt.plot(kpath, [e[n]*au2ev for e in e_kn_2], color='#4169E1')
for p in sp_points:
plt.plot([p, p], [emin, emax], 'k-')
plt.plot([0, sp_points[-1]], [0, 0], 'k-')
plt.xticks(sp_points, ['$%s$' % n for n in ['L', r'\Gamma', 'X', 'W', 'K', r'\Gamma']])
plt.axis(xmin=0, xmax=sp_points[-1], ymin=emin, ymax=emax)
plt.xlabel('k-vector')
plt.show()
|
apache-2.0
|
scipy/scipy
|
scipy/stats/_entropy.py
|
12
|
11491
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 09:06:05 2021
@author: matth
"""
from __future__ import annotations
import math
import numpy as np
from scipy import special
from typing import Optional, Union
__all__ = ['entropy', 'differential_entropy']
def entropy(pk, qk=None, base=None, axis=0):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=axis)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=axis)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis: int, optional
The axis along which the entropy is calculated. Default is 0.
Returns
-------
S : float
The calculated entropy.
Examples
--------
>>> from scipy.stats import entropy
Bernoulli trial with different p.
The outcome of a fair coin is the most uncertain:
>>> entropy([1/2, 1/2], base=2)
1.0
The outcome of a biased coin is less uncertain:
>>> entropy([9/10, 1/10], base=2)
0.46899559358928117
Relative entropy:
>>> entropy([1/2, 1/2], qk=[9/10, 1/10])
0.5108256237659907
"""
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
pk = np.asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=axis, keepdims=True)
if qk is None:
vec = special.entr(pk)
else:
qk = np.asarray(qk)
pk, qk = np.broadcast_arrays(pk, qk)
qk = 1.0*qk / np.sum(qk, axis=axis, keepdims=True)
vec = special.rel_entr(pk, qk)
S = np.sum(vec, axis=axis)
if base is not None:
S /= np.log(base)
return S
def differential_entropy(
values: np.typing.ArrayLike,
*,
window_length: Optional[int] = None,
base: Optional[float] = None,
axis: int = 0,
method: str = "auto",
) -> Union[np.number, np.ndarray]:
r"""Given a sample of a distribution, estimate the differential entropy.
Several estimation methods are available using the `method` parameter. By
default, a method is selected based the size of the sample.
Parameters
----------
values : sequence
Sample from a continuous distribution.
window_length : int, optional
Window length for computing Vasicek estimate. Must be an integer
between 1 and half of the sample size. If ``None`` (the default), it
uses the heuristic value
.. math::
\left \lfloor \sqrt{n} + 0.5 \right \rfloor
where :math:`n` is the sample size. This heuristic was originally
proposed in [2]_ and has become common in the literature.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
axis : int, optional
The axis along which the differential entropy is calculated.
Default is 0.
method : {'vasicek', 'van es', 'ebrahimi', 'correa', 'auto'}, optional
The method used to estimate the differential entropy from the sample.
Default is ``'auto'``. See Notes for more information.
Returns
-------
entropy : float
The calculated differential entropy.
Notes
-----
This function will converge to the true differential entropy in the limit
.. math::
n \to \infty, \quad m \to \infty, \quad \frac{m}{n} \to 0
The optimal choice of ``window_length`` for a given sample size depends on
the (unknown) distribution. Typically, the smoother the density of the
distribution, the larger the optimal value of ``window_length`` [1]_.
The following options are available for the `method` parameter.
* ``'vasicek'`` uses the estimator presented in [1]_. This is
one of the first and most influential estimators of differential entropy.
* ``'van es'`` uses the bias-corrected estimator presented in [3]_, which
is not only consistent but, under some conditions, asymptotically normal.
* ``'ebrahimi'`` uses an estimator presented in [4]_, which was shown
in simulation to have smaller bias and mean squared error than
the Vasicek estimator.
* ``'correa'`` uses the estimator presented in [5]_ based on local linear
regression. In a simulation study, it had consistently smaller mean
square error than the Vasiceck estimator, but it is more expensive to
compute.
* ``'auto'`` selects the method automatically (default). Currently,
this selects ``'van es'`` for very small samples (<10), ``'ebrahimi'``
for moderate sample sizes (11-1000), and ``'vasicek'`` for larger
samples, but this behavior is subject to change in future versions.
All estimators are implemented as described in [6]_.
References
----------
.. [1] Vasicek, O. (1976). A test for normality based on sample entropy.
Journal of the Royal Statistical Society:
Series B (Methodological), 38(1), 54-59.
.. [2] Crzcgorzewski, P., & Wirczorkowski, R. (1999). Entropy-based
goodness-of-fit test for exponentiality. Communications in
Statistics-Theory and Methods, 28(5), 1183-1202.
.. [3] Van Es, B. (1992). Estimating functionals related to a density by a
class of statistics based on spacings. Scandinavian Journal of
Statistics, 61-72.
.. [4] Ebrahimi, N., Pflughoeft, K., & Soofi, E. S. (1994). Two measures
of sample entropy. Statistics & Probability Letters, 20(3), 225-234.
.. [5] Correa, J. C. (1995). A new estimator of entropy. Communications
in Statistics-Theory and Methods, 24(10), 2439-2449.
.. [6] Noughabi, H. A. (2015). Entropy Estimation Using Numerical Methods.
Annals of Data Science, 2(2), 231-241.
https://link.springer.com/article/10.1007/s40745-015-0045-9
Examples
--------
>>> from scipy.stats import differential_entropy, norm
Entropy of a standard normal distribution:
>>> rng = np.random.default_rng()
>>> values = rng.standard_normal(100)
>>> differential_entropy(values)
1.3407817436640392
Compare with the true entropy:
>>> float(norm.entropy())
1.4189385332046727
For several sample sizes between 5 and 1000, compare the accuracy of
the ``'vasicek'``, ``'van es'``, and ``'ebrahimi'`` methods. Specifically,
compare the root mean squared error (over 1000 trials) between the estimate
and the true differential entropy of the distribution.
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>>
>>>
>>> def rmse(res, expected):
... '''Root mean squared error'''
... return np.sqrt(np.mean((res - expected)**2))
>>>
>>>
>>> a, b = np.log10(5), np.log10(1000)
>>> ns = np.round(np.logspace(a, b, 10)).astype(int)
>>> reps = 1000 # number of repetitions for each sample size
>>> expected = stats.expon.entropy()
>>>
>>> method_errors = {'vasicek': [], 'van es': [], 'ebrahimi': []}
>>> for method in method_errors:
... for n in ns:
... rvs = stats.expon.rvs(size=(reps, n), random_state=rng)
... res = stats.differential_entropy(rvs, method=method, axis=-1)
... error = rmse(res, expected)
... method_errors[method].append(error)
>>>
>>> for method, errors in method_errors.items():
... plt.loglog(ns, errors, label=method)
>>>
>>> plt.legend()
>>> plt.xlabel('sample size')
>>> plt.ylabel('RMSE (1000 trials)')
>>> plt.title('Entropy Estimator Error (Exponential Distribution)')
"""
values = np.asarray(values)
values = np.moveaxis(values, axis, -1)
n = values.shape[-1] # number of observations
if window_length is None:
window_length = math.floor(math.sqrt(n) + 0.5)
if not 2 <= 2 * window_length < n:
raise ValueError(
f"Window length ({window_length}) must be positive and less "
f"than half the sample size ({n}).",
)
if base is not None and base <= 0:
raise ValueError("`base` must be a positive number or `None`.")
sorted_data = np.sort(values, axis=-1)
methods = {"vasicek": _vasicek_entropy,
"van es": _van_es_entropy,
"correa": _correa_entropy,
"ebrahimi": _ebrahimi_entropy,
"auto": _vasicek_entropy}
method = method.lower()
if method not in methods:
message = f"`method` must be one of {set(methods)}"
raise ValueError(message)
if method == "auto":
if n <= 10:
method = 'van es'
elif n <= 1000:
method = 'ebrahimi'
else:
method = 'vasicek'
res = methods[method](sorted_data, window_length)
if base is not None:
res /= np.log(base)
return res
def _pad_along_last_axis(X, m):
"""Pad the data for computing the rolling window difference."""
# scales a bit better than method in _vasicek_like_entropy
shape = np.array(X.shape)
shape[-1] = m
Xl = np.broadcast_to(X[..., [0]], shape) # [0] vs 0 to maintain shape
Xr = np.broadcast_to(X[..., [-1]], shape)
return np.concatenate((Xl, X, Xr), axis=-1)
def _vasicek_entropy(X, m):
"""Compute the Vasicek estimator as described in [6] Eq. 1.3."""
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
logs = np.log(n/(2*m) * differences)
return np.mean(logs, axis=-1)
def _van_es_entropy(X, m):
"""Compute the van Es estimator as described in [6]."""
# No equation number, but referred to as HVE_mn.
# Typo: there should be a log within the summation.
n = X.shape[-1]
difference = X[..., m:] - X[..., :-m]
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * difference), axis=-1)
k = np.arange(m, n+1)
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
def _ebrahimi_entropy(X, m):
"""Compute the Ebrahimi estimator as described in [6]."""
# No equation number, but referred to as HE_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
i = np.arange(1, n+1).astype(float)
ci = np.ones_like(i)*2
ci[i <= m] = 1 + (i[i <= m] - 1)/m
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
logs = np.log(n * differences / (ci * m))
return np.mean(logs, axis=-1)
def _correa_entropy(X, m):
"""Compute the Correa estimator as described in [6]."""
# No equation number, but referred to as HC_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
i = np.arange(1, n+1)
dj = np.arange(-m, m+1)[:, None]
j = i + dj
j0 = j + m - 1 # 0-indexed version of j
Xibar = np.mean(X[..., j0], axis=-2, keepdims=True)
difference = X[..., j0] - Xibar
num = np.sum(difference*dj, axis=-2) # dj is d-i
den = n*np.sum(difference**2, axis=-2)
return -np.mean(np.log(num/den), axis=-1)
|
bsd-3-clause
|
iskandr/fancyimpute
|
experiments/complete_faces.py
|
2
|
10454
|
from os import mkdir
from os.path import exists, join
from collections import defaultdict
import pylab
from sklearn.datasets import fetch_lfw_people
from sklearn.impute import IterativeImputer
import numpy as np
from fancyimpute import (
SimpleFill,
IterativeSVD,
SoftImpute,
BiScaler,
KNN
)
from fancyimpute.common import masked_mae, masked_mse
def remove_pixels(
full_images,
missing_square_size=32,
random_seed=0):
np.random.seed(random_seed)
incomplete_faces = []
n_faces = len(full_images)
height, width = full_images[0].shape[:2]
for i in range(n_faces):
image = full_images[i].copy()
start_x = np.random.randint(
low=0,
high=height - missing_square_size + 1)
start_y = np.random.randint(
low=0,
high=width - missing_square_size + 1)
image[
start_x: start_x + missing_square_size,
start_y: start_y + missing_square_size] = np.nan
incomplete_faces.append(image)
return np.array(incomplete_faces, dtype=np.float32)
def rescale_pixel_values(images, order="C"):
"""
Rescale the range of values in images to be between [0, 1]
"""
images = np.asarray(images, order=order).astype("float32")
images -= images.min()
images /= images.max()
return images
def color_balance(images):
images = images.astype("float32")
red = images[:, :, :, 0]
green = images[:, :, :, 1]
blue = images[:, :, :, 2]
combined = (red + green + blue)
total_color = combined.sum()
overall_fraction_red = red.sum() / total_color
overall_fraction_green = green.sum() / total_color
overall_fraction_blue = blue.sum() / total_color
for i in range(images.shape[0]):
image = images[i]
image_total = combined[i].sum()
red_scale = overall_fraction_red / (red[i].sum() / image_total)
green_scale = overall_fraction_green / (green[i].sum() / image_total)
blue_scale = overall_fraction_blue / (blue[i].sum() / image_total)
image[:, :, 0] *= red_scale
image[:, :, 1] *= green_scale
image[:, :, 2] *= blue_scale
image[image < 0] = 0
image[image > 255] = 255
return images
class ResultsTable(object):
def __init__(
self,
images_dict,
percent_missing=0.25,
saved_image_stride=25,
dirname="face_images",
scale_rows=False,
center_rows=False):
self.images_dict = images_dict
self.labels = list(sorted(images_dict.keys()))
self.images_array = np.array(
[images_dict[k] for k in self.labels]).astype("float32")
self.image_shape = self.images_array[0].shape
self.width, self.height = self.image_shape[:2]
self.color = (len(self.image_shape) == 3) and (self.image_shape[2] == 3)
if self.color:
self.images_array = color_balance(self.images_array)
self.n_pixels = self.width * self.height
self.n_features = self.n_pixels * (3 if self.color else 1)
self.n_images = len(self.images_array)
print("[ResultsTable] # images = %d, color=%s # features = %d, shape = %s" % (
self.n_images, self.color, self.n_features, self.image_shape))
self.flattened_array_shape = (self.n_images, self.n_features)
self.flattened_images = self.images_array.reshape(self.flattened_array_shape)
n_missing_pixels = int(self.n_pixels * percent_missing)
missing_square_size = int(np.sqrt(n_missing_pixels))
print("[ResultsTable] n_missing_pixels = %d, missing_square_size = %d" % (
n_missing_pixels, missing_square_size))
self.incomplete_images = remove_pixels(
self.images_array,
missing_square_size=missing_square_size)
print("[ResultsTable] Incomplete images shape = %s" % (
self.incomplete_images.shape,))
self.flattened_incomplete_images = self.incomplete_images.reshape(
self.flattened_array_shape)
self.missing_mask = np.isnan(self.flattened_incomplete_images)
self.normalizer = BiScaler(
scale_rows=scale_rows,
center_rows=center_rows,
min_value=self.images_array.min(),
max_value=self.images_array.max())
self.incomplete_normalized = self.normalizer.fit_transform(
self.flattened_incomplete_images)
self.saved_image_indices = list(
range(0, self.n_images, saved_image_stride))
self.saved_images = defaultdict(dict)
self.dirname = dirname
self.mse_dict = {}
self.mae_dict = {}
self.save_images(self.images_array, "original", flattened=False)
self.save_images(self.incomplete_images, "incomplete", flattened=False)
def ensure_dir(self, dirname):
if not exists(dirname):
print("Creating directory: %s" % dirname)
mkdir(dirname)
def save_images(self, images, base_filename, flattened=True):
self.ensure_dir(self.dirname)
for i in self.saved_image_indices:
label = self.labels[i].lower().replace(" ", "_")
image = images[i, :].copy()
if flattened:
image = image.reshape(self.image_shape)
image[np.isnan(image)] = 0
figure = pylab.gcf()
axes = pylab.gca()
extra_kwargs = {}
if self.color:
extra_kwargs["cmap"] = "gray"
assert image.min() >= 0, "Image can't contain negative numbers"
if image.max() <= 1:
image *= 256
image[image > 255] = 255
axes.imshow(image.astype("uint8"), **extra_kwargs)
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
filename = base_filename + ".png"
subdir = join(self.dirname, label)
self.ensure_dir(subdir)
path = join(subdir, filename)
figure.savefig(
path,
bbox_inches='tight')
self.saved_images[i][base_filename] = path
def add_entry(self, solver, name):
print("Running %s" % name)
completed_normalized = solver.fit_transform(self.incomplete_normalized)
completed = self.normalizer.inverse_transform(completed_normalized)
mae = masked_mae(
X_true=self.flattened_images,
X_pred=completed,
mask=self.missing_mask)
mse = masked_mse(
X_true=self.flattened_images,
X_pred=completed,
mask=self.missing_mask)
print("==> %s: MSE=%0.4f MAE=%0.4f" % (name, mse, mae))
self.mse_dict[name] = mse
self.mae_dict[name] = mae
self.save_images(completed, base_filename=name)
def sorted_errors(self):
"""
Generator for (rank, name, MSE, MAE) sorted by increasing MAE
"""
for i, (name, mae) in enumerate(
sorted(self.mae_dict.items(), key=lambda x: x[1])):
yield(i + 1, name, self.mse_dict[name], self.mae_dict[name],)
def print_sorted_errors(self):
for (rank, name, mse, mae) in self.sorted_errors():
print("%d) %s: MSE=%0.4f MAE=%0.4f" % (
rank,
name,
mse,
mae))
def save_html_table(self, filename="results_table.html"):
html = """
<table>
<th>
<td>Rank</td>
<td>Name</td>
<td>Mean Squared Error</td>
<td>Mean Absolute Error</td>
</th>
"""
for (rank, name, mse, mae) in self.sorted_errors():
html += """
<tr>
<td>%d</td>
<td>%s</td>
<td>%0.4f</td>
<td>%0.4f</td>
</tr>
""" % (rank, name, mse, mae)
html += "</table>"
self.ensure_dir(self.dirname)
path = join(self.dirname, filename)
with open(path, "w") as f:
f.write(html)
return html
def image_per_label(images, label_indices, label_names, max_size=2000):
groups = defaultdict(list)
for i, label_idx in enumerate(label_indices):
label = label_names[label_idx].lower().strip().replace(" ", "_")
groups[label].append(images[i])
# as a pretty arbitrary heuristic, let's try taking the min variance
# image for each person
singe_images = {}
for label, images in sorted(groups.items()):
singe_images[label] = min(images, key=lambda image: image.std())
if max_size and len(singe_images) >= max_size:
break
return singe_images
def get_lfw(max_size=None):
dataset = fetch_lfw_people(color=True)
# keep only one image per person
return image_per_label(
dataset.images,
dataset.target,
dataset.target_names,
max_size=max_size)
if __name__ == "__main__":
images_dict = get_lfw(max_size=2000)
table = ResultsTable(
images_dict=images_dict,
scale_rows=False,
center_rows=False)
for negative_log_regularization_weight in [2, 3, 4]:
regularization_weight = 10.0 ** -negative_log_regularization_weight
table.add_entry(
solver=IterativeImputer(
n_nearest_features=80,
max_iter=50
),
name="IterativeImputer_%d" % negative_log_regularization_weight)
for fill_method in ["mean", "median"]:
table.add_entry(
solver=SimpleFill(fill_method=fill_method),
name="SimpleFill_%s" % fill_method)
for k in [1, 3, 7]:
table.add_entry(
solver=KNN(
k=k,
orientation="rows"),
name="KNN_k%d" % (k,))
for shrinkage_value in [25, 50, 100]:
# SoftImpute without rank constraints
table.add_entry(
solver=SoftImpute(
shrinkage_value=shrinkage_value),
name="SoftImpute_lambda%d" % (shrinkage_value,))
for rank in [10, 20, 40]:
table.add_entry(
solver=IterativeSVD(
rank=rank,
init_fill_method="zero"),
name="IterativeSVD_rank%d" % (rank,))
table.save_html_table()
table.print_sorted_errors()
|
apache-2.0
|
UK992/servo
|
tests/wpt/web-platform-tests/tools/third_party/more-itertools/more_itertools/more.py
|
39
|
64928
|
from __future__ import print_function
from collections import Counter, defaultdict, deque
from functools import partial, wraps
from heapq import merge
from itertools import (
chain,
compress,
count,
cycle,
dropwhile,
groupby,
islice,
repeat,
takewhile,
tee
)
from operator import itemgetter, lt, gt, sub
from sys import maxsize, version_info
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
from six import binary_type, string_types, text_type
from six.moves import filter, map, range, zip, zip_longest
from .recipes import consume, flatten, take
__all__ = [
'adjacent',
'always_iterable',
'always_reversible',
'bucket',
'chunked',
'circular_shifts',
'collapse',
'collate',
'consecutive_groups',
'consumer',
'count_cycle',
'difference',
'distinct_permutations',
'distribute',
'divide',
'exactly_n',
'first',
'groupby_transform',
'ilen',
'interleave_longest',
'interleave',
'intersperse',
'islice_extended',
'iterate',
'locate',
'lstrip',
'make_decorator',
'map_reduce',
'numeric_range',
'one',
'padded',
'peekable',
'rstrip',
'run_length',
'seekable',
'SequenceView',
'side_effect',
'sliced',
'sort_together',
'split_at',
'split_after',
'split_before',
'spy',
'stagger',
'strip',
'unique_to_each',
'windowed',
'with_iter',
'zip_offset',
]
_marker = object()
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
"""
return iter(partial(take, n, iter(iterable)), [])
def first(iterable, default=_marker):
"""Return the first item of *iterable*, or *default* if *iterable* is
empty.
>>> first([0, 1, 2, 3])
0
>>> first([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
:func:`first` is useful when you have a generator of expensive-to-retrieve
values and want any arbitrary one. It is marginally shorter than
``next(iter(iterable), default)``.
"""
try:
return next(iter(iterable))
except StopIteration:
# I'm on the edge about raising ValueError instead of StopIteration. At
# the moment, ValueError wins, because the caller could conceivably
# want to do something different with flow control when I raise the
# exception, and it's weird to explicitly catch StopIteration.
if default is _marker:
raise ValueError('first() was called on an empty iterable, and no '
'default value was provided.')
return default
class peekable(object):
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhaused
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def __nonzero__(self):
# For Python 2 compatibility
return self.__bool__()
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
next = __next__ # For Python 2 compatibility
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def _collate(*iterables, **kwargs):
"""Helper for ``collate()``, called when the user is using the ``reverse``
or ``key`` keyword arguments on Python versions below 3.5.
"""
key = kwargs.pop('key', lambda a: a)
reverse = kwargs.pop('reverse', False)
min_or_max = partial(max if reverse else min, key=itemgetter(0))
peekables = [peekable(it) for it in iterables]
peekables = [p for p in peekables if p] # Kill empties.
while peekables:
_, p = min_or_max((key(p.peek()), p) for p in peekables)
yield next(p)
peekables = [x for x in peekables if x]
def collate(*iterables, **kwargs):
"""Return a sorted merge of the items from each of several already-sorted
*iterables*.
>>> list(collate('ACDZ', 'AZ', 'JKL'))
['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
Works lazily, keeping only the next value from each iterable in memory. Use
:func:`collate` to, for example, perform a n-way mergesort of items that
don't fit in memory.
If a *key* function is specified, the iterables will be sorted according
to its result:
>>> key = lambda s: int(s) # Sort by numeric value, not by string
>>> list(collate(['1', '10'], ['2', '11'], key=key))
['1', '2', '10', '11']
If the *iterables* are sorted in descending order, set *reverse* to
``True``:
>>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
[5, 4, 3, 2, 1, 0]
If the elements of the passed-in iterables are out of order, you might get
unexpected results.
On Python 2.7, this function delegates to :func:`heapq.merge` if neither
of the keyword arguments are specified. On Python 3.5+, this function
is an alias for :func:`heapq.merge`.
"""
if not kwargs:
return merge(*iterables)
return _collate(*iterables, **kwargs)
# If using Python version 3.5 or greater, heapq.merge() will be faster than
# collate - use that instead.
if version_info >= (3, 5, 0):
_collate_docstring = collate.__doc__
collate = partial(merge)
collate.__doc__ = _collate_docstring
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# maxlen=1 only stores the last item in the deque
d = deque(enumerate(iterable, 1), maxlen=1)
# since we started enumerate at 1,
# the first item of the last pair will be the length of the iterable
# (assuming there were items)
return d[0][0] if d else 0
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
start = func(start)
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
for item in iterable:
yield item
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. If there is more than one, both items will be discarded.
See :func:`spy` or :func:`peekable` to check iterable contents less
destructively.
"""
it = iter(iterable)
try:
value = next(it)
except StopIteration:
raise too_short or ValueError('too few items in iterable (expected 1)')
try:
next(it)
except StopIteration:
pass
else:
raise too_long or ValueError('too many items in iterable (expected 1)')
return value
def distinct_permutations(iterable):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
"""
def perm_unique_helper(item_counts, perm, i):
"""Internal helper function
:arg item_counts: Stores the unique items in ``iterable`` and how many
times they are repeated
:arg perm: The permutation that is being built for output
:arg i: The index of the permutation being modified
The output permutations are built up recursively; the distinct items
are placed until their repetitions are exhausted.
"""
if i < 0:
yield tuple(perm)
else:
for item in item_counts:
if item_counts[item] <= 0:
continue
perm[i] = item
item_counts[item] -= 1
for x in perm_unique_helper(item_counts, perm, i - 1):
yield x
item_counts[item] += 1
item_counts = Counter(iterable)
length = sum(item_counts.values())
return perm_unique_helper(item_counts, [None] * length, length - 1)
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values::
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield tuple()
return
if step < 1:
raise ValueError('step must be >= 1')
it = iter(seq)
window = deque([], n)
append = window.append
# Initial deque fill
for _ in range(n):
append(next(it, fillvalue))
yield tuple(window)
# Appending new items to the right causes old items to fall off the left
i = 0
for item in it:
append(item)
i = (i + 1) % step
if i % step == 0:
yield tuple(window)
# If there are items from the iterable in the window, pad with the given
# value and emit them.
if (i % step) and (step - i < n):
for _ in range(step - i):
append(fillvalue)
yield tuple(window)
class bucket(object):
"""Wrap *iterable* and return an object that buckets it iterable into
child iterables based on a *key* function.
>>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
>>> s = bucket(iterable, key=lambda x: x[0])
>>> a_iterable = s['a']
>>> next(a_iterable)
'a1'
>>> next(a_iterable)
'a2'
>>> list(s['b'])
['b1', 'b2', 'b3']
The original iterable will be advanced and its items will be cached until
they are used by the child iterables. This may require significant storage.
By default, attempting to select a bucket to which no items belong will
exhaust the iterable and cache all values.
If you specify a *validator* function, selected buckets will instead be
checked against it.
>>> from itertools import count
>>> it = count(1, 2) # Infinite sequence of odd numbers
>>> key = lambda x: x % 10 # Bucket by last digit
>>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
>>> s = bucket(it, key=key, validator=validator)
>>> 2 in s
False
>>> list(s[2])
[]
"""
def __init__(self, iterable, key, validator=None):
self._it = iter(iterable)
self._key = key
self._cache = defaultdict(deque)
self._validator = validator or (lambda x: True)
def __contains__(self, value):
if not self._validator(value):
return False
try:
item = next(self[value])
except StopIteration:
return False
else:
self._cache[value].appendleft(item)
return True
def _get_values(self, value):
"""
Helper to yield items from the parent iterator that match *value*.
Items that don't match are stored in the local cache as they
are encountered.
"""
while True:
# If we've cached some items that match the target value, emit
# the first one and evict it from the cache.
if self._cache[value]:
yield self._cache[value].popleft()
# Otherwise we need to advance the parent iterator to search for
# a matching item, caching the rest.
else:
while True:
try:
item = next(self._it)
except StopIteration:
return
item_value = self._key(item)
if item_value == value:
yield item
break
elif self._validator(item_value):
self._cache[item_value].append(item)
def __getitem__(self, value):
if not self._validator(value):
return iter(())
return self._get_values(value)
def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it)
def interleave(*iterables):
"""Return a new iterable yielding from each iterable in turn,
until the shortest is exhausted.
>>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7]
For a version that doesn't terminate after the shortest iterable is
exhausted, see :func:`interleave_longest`.
"""
return chain.from_iterable(zip(*iterables))
def interleave_longest(*iterables):
"""Return a new iterable yielding from each iterable in turn,
skipping any that are exhausted.
>>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
[1, 4, 6, 2, 5, 7, 3, 8]
This function produces the same output as :func:`roundrobin`, but may
perform better for some inputs (in particular when the number of iterables
is large).
"""
i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
return (x for x in i if x is not _marker)
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
String types are not considered iterable and will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels)) or
isinstance(node, string_types) or
((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
for x in walk(child, level + 1):
yield x
for x in walk(iterable, 0):
yield x
def side_effect(func, iterable, chunk_size=None, before=None, after=None):
"""Invoke *func* on each item in *iterable* (or on each *chunk_size* group
of items) before yielding the item.
`func` must be a function that takes a single argument. Its return value
will be discarded.
*before* and *after* are optional functions that take no arguments. They
will be executed before iteration starts and after it ends, respectively.
`side_effect` can be used for logging, updating progress bars, or anything
that is not functionally "pure."
Emitting a status message:
>>> from more_itertools import consume
>>> func = lambda item: print('Received {}'.format(item))
>>> consume(side_effect(func, range(2)))
Received 0
Received 1
Operating on chunks of items:
>>> pair_sums = []
>>> func = lambda chunk: pair_sums.append(sum(chunk))
>>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
[0, 1, 2, 3, 4, 5]
>>> list(pair_sums)
[1, 5, 9]
Writing to a file-like object:
>>> from io import StringIO
>>> from more_itertools import consume
>>> f = StringIO()
>>> func = lambda x: print(x, file=f)
>>> before = lambda: print(u'HEADER', file=f)
>>> after = f.close
>>> it = [u'a', u'b', u'c']
>>> consume(side_effect(func, it, before=before, after=after))
>>> f.closed
True
"""
try:
if before is not None:
before()
if chunk_size is None:
for item in iterable:
func(item)
yield item
else:
for chunk in chunked(iterable, chunk_size):
func(chunk)
for item in chunk:
yield item
finally:
if after is not None:
after()
def sliced(seq, n):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
return takewhile(bool, (seq[i: i + n] for i in count(0, n)))
def split_at(iterable, pred):
"""Yield lists of items from *iterable*, where each list is delimited by
an item where callable *pred* returns ``True``. The lists do not include
the delimiting items.
>>> list(split_at('abcdcba', lambda x: x == 'b'))
[['a'], ['c', 'd', 'c'], ['a']]
>>> list(split_at(range(10), lambda n: n % 2 == 1))
[[0], [2], [4], [6], [8], []]
"""
buf = []
for item in iterable:
if pred(item):
yield buf
buf = []
else:
buf.append(item)
yield buf
def split_before(iterable, pred):
"""Yield lists of items from *iterable*, where each list starts with an
item where callable *pred* returns ``True``:
>>> list(split_before('OneTwo', lambda s: s.isupper()))
[['O', 'n', 'e'], ['T', 'w', 'o']]
>>> list(split_before(range(10), lambda n: n % 3 == 0))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
buf = []
for item in iterable:
if pred(item) and buf:
yield buf
buf = []
buf.append(item)
yield buf
def split_after(iterable, pred):
"""Yield lists of items from *iterable*, where each list ends with an
item where callable *pred* returns ``True``:
>>> list(split_after('one1two2', lambda s: s.isdigit()))
[['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
>>> list(split_after(range(10), lambda n: n % 3 == 0))
[[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
buf = []
for item in iterable:
buf.append(item)
if pred(item) and buf:
yield buf
buf = []
if buf:
yield buf
def padded(iterable, fillvalue=None, n=None, next_multiple=False):
"""Yield the elements from *iterable*, followed by *fillvalue*, such that
at least *n* items are emitted.
>>> list(padded([1, 2, 3], '?', 5))
[1, 2, 3, '?', '?']
If *next_multiple* is ``True``, *fillvalue* will be emitted until the
number of items emitted is a multiple of *n*::
>>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
[1, 2, 3, 4, None, None]
If *n* is ``None``, *fillvalue* will be emitted indefinitely.
"""
it = iter(iterable)
if n is None:
for item in chain(it, repeat(fillvalue)):
yield item
elif n < 1:
raise ValueError('n must be at least 1')
else:
item_count = 0
for item in it:
yield item
item_count += 1
remaining = (n - item_count) % n if next_multiple else n - item_count
for _ in range(remaining):
yield fillvalue
def distribute(n, iterable):
"""Distribute the items from *iterable* among *n* smaller iterables.
>>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 3, 5]
>>> list(group_2)
[2, 4, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 4, 7], [2, 5], [3, 6]]
If the length of *iterable* is smaller than *n*, then the last returned
iterables will be empty:
>>> children = distribute(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function uses :func:`itertools.tee` and may require significant
storage. If you need the order items in the smaller iterables to match the
original iterable, see :func:`divide`.
"""
if n < 1:
raise ValueError('n must be at least 1')
children = tee(iterable, n)
return [islice(it, index, None, n) for index, it in enumerate(children)]
def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
"""Yield tuples whose elements are offset from *iterable*.
The amount by which the `i`-th item in each tuple is offset is given by
the `i`-th item in *offsets*.
>>> list(stagger([0, 1, 2, 3]))
[(None, 0, 1), (0, 1, 2), (1, 2, 3)]
>>> list(stagger(range(8), offsets=(0, 2, 4)))
[(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
By default, the sequence will end when the final element of a tuple is the
last item in the iterable. To continue until the first element of a tuple
is the last item in the iterable, set *longest* to ``True``::
>>> list(stagger([0, 1, 2, 3], longest=True))
[(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
children = tee(iterable, len(offsets))
return zip_offset(
*children, offsets=offsets, longest=longest, fillvalue=fillvalue
)
def zip_offset(*iterables, **kwargs):
"""``zip`` the input *iterables* together, but offset the `i`-th iterable
by the `i`-th item in *offsets*.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
This can be used as a lightweight alternative to SciPy or pandas to analyze
data sets in which somes series have a lead or lag relationship.
By default, the sequence will end when the shortest iterable is exhausted.
To continue until the longest iterable is exhausted, set *longest* to
``True``.
>>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
[('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
By default, ``None`` will be used to replace offsets beyond the end of the
sequence. Specify *fillvalue* to use some other value.
"""
offsets = kwargs['offsets']
longest = kwargs.get('longest', False)
fillvalue = kwargs.get('fillvalue', None)
if len(iterables) != len(offsets):
raise ValueError("Number of iterables and offsets didn't match")
staggered = []
for it, n in zip(iterables, offsets):
if n < 0:
staggered.append(chain(repeat(fillvalue, -n), it))
elif n > 0:
staggered.append(islice(it, n, None))
else:
staggered.append(it)
if longest:
return zip_longest(*staggered, fillvalue=fillvalue)
return zip(*staggered)
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying mutliple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(zip(*sorted(zip(*iterables),
key=itemgetter(*key_list),
reverse=reverse)))
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last returned
iterables will be empty:
>>> children = divide(5, [1, 2, 3])
>>> [list(c) for c in children]
[[1], [2], [3], [], []]
This function will exhaust the iterable before returning and may require
significant storage. If order is not important, see :func:`distribute`,
which does not first pull the iterable into memory.
"""
if n < 1:
raise ValueError('n must be at least 1')
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def always_iterable(obj, base_type=(text_type, binary_type)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))
def adjacent(predicate, iterable, distance=1):
"""Return an iterable over `(bool, item)` tuples where the `item` is
drawn from *iterable* and the `bool` indicates whether
that item satisfies the *predicate* or is adjacent to an item that does.
For example, to find whether items are adjacent to a ``3``::
>>> list(adjacent(lambda x: x == 3, range(6)))
[(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
Set *distance* to change what counts as adjacent. For example, to find
whether items are two places away from a ``3``:
>>> list(adjacent(lambda x: x == 3, range(6), distance=2))
[(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
This is useful for contextualizing the results of a search function.
For example, a code comparison tool might want to identify lines that
have changed, but also surrounding lines to give the viewer of the diff
context.
The predicate function will only be called once for each item in the
iterable.
See also :func:`groupby_transform`, which can be used with this function
to group ranges of items with the same `bool` value.
"""
# Allow distance=0 mainly for testing that it reproduces results with map()
if distance < 0:
raise ValueError('distance must be at least 0')
i1, i2 = tee(iterable)
padding = [False] * distance
selected = chain(padding, map(predicate, i1), padding)
adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
return zip(adjacent_to_selected, i2)
def groupby_transform(iterable, keyfunc=None, valuefunc=None):
"""An extension of :func:`itertools.groupby` that transforms the values of
*iterable* after grouping them.
*keyfunc* is a function used to compute a grouping key for each item.
*valuefunc* is a function for transforming the items after grouping.
>>> iterable = 'AaaABbBCcA'
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: x.lower()
>>> grouper = groupby_transform(iterable, keyfunc, valuefunc)
>>> [(k, ''.join(g)) for k, g in grouper]
[('A', 'aaaa'), ('B', 'bbb'), ('C', 'cc'), ('A', 'a')]
*keyfunc* and *valuefunc* default to identity functions if they are not
specified.
:func:`groupby_transform` is useful when grouping elements of an iterable
using a separate iterable as the key. To do this, :func:`zip` the iterables
and pass a *keyfunc* that extracts the first element and a *valuefunc*
that extracts the second element::
>>> from operator import itemgetter
>>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
>>> values = 'abcdefghi'
>>> iterable = zip(keys, values)
>>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
>>> [(k, ''.join(g)) for k, g in grouper]
[(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
Note that the order of items in the iterable is significant.
Only adjacent items are grouped together, so if you don't want any
duplicate groups, you should sort the iterable by the key function.
"""
valuefunc = (lambda x: x) if valuefunc is None else valuefunc
return ((k, map(valuefunc, g)) for k, g in groupby(iterable, keyfunc))
def numeric_range(*args):
"""An extension of the built-in ``range()`` function whose arguments can
be any orderable numeric type.
With only *stop* specified, *start* defaults to ``0`` and *step*
defaults to ``1``. The output items will match the type of *stop*:
>>> list(numeric_range(3.5))
[0.0, 1.0, 2.0, 3.0]
With only *start* and *stop* specified, *step* defaults to ``1``. The
output items will match the type of *start*:
>>> from decimal import Decimal
>>> start = Decimal('2.1')
>>> stop = Decimal('5.1')
>>> list(numeric_range(start, stop))
[Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
With *start*, *stop*, and *step* specified the output items will match
the type of ``start + step``:
>>> from fractions import Fraction
>>> start = Fraction(1, 2) # Start at 1/2
>>> stop = Fraction(5, 2) # End at 5/2
>>> step = Fraction(1, 2) # Count by 1/2
>>> list(numeric_range(start, stop, step))
[Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
>>> list(numeric_range(3, -1, -1.0))
[3.0, 2.0, 1.0, 0.0]
Be aware of the limitations of floating point numbers; the representation
of the yielded numbers may be surprising.
"""
argc = len(args)
if argc == 1:
stop, = args
start = type(stop)(0)
step = 1
elif argc == 2:
start, stop = args
step = 1
elif argc == 3:
start, stop, step = args
else:
err_msg = 'numeric_range takes at most 3 arguments, got {}'
raise TypeError(err_msg.format(argc))
values = (start + (step * n) for n in count())
if step > 0:
return takewhile(partial(gt, stop), values)
elif step < 0:
return takewhile(partial(lt, stop), values)
else:
raise ValueError('numeric_range arg 3 must not be zero')
def count_cycle(iterable, n=None):
"""Cycle through the items from *iterable* up to *n* times, yielding
the number of completed cycles along with each item. If *n* is omitted the
process repeats indefinitely.
>>> list(count_cycle('AB', 3))
[(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
"""
iterable = tuple(iterable)
if not iterable:
return iter(())
counter = count() if n is None else range(n)
return ((i, item) for i in counter for item in iterable)
def locate(iterable, pred=bool):
"""Yield the index of each item in *iterable* for which *pred* returns
``True``.
*pred* defaults to :func:`bool`, which will select truthy items:
>>> list(locate([0, 1, 1, 0, 1, 0, 0]))
[1, 2, 4]
Set *pred* to a custom function to, e.g., find the indexes for a particular
item:
>>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
[1, 3]
Use with :func:`windowed` to find the indexes of a sub-sequence:
>>> from more_itertools import windowed
>>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
>>> sub = [1, 2, 3]
>>> pred = lambda w: w == tuple(sub) # windowed() returns tuples
>>> list(locate(windowed(iterable, len(sub)), pred=pred))
[1, 5, 9]
Use with :func:`seekable` to find indexes and then retrieve the associated
items:
>>> from itertools import count
>>> from more_itertools import seekable
>>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
>>> it = seekable(source)
>>> pred = lambda x: x > 100
>>> indexes = locate(it, pred=pred)
>>> i = next(indexes)
>>> it.seek(i)
>>> next(it)
106
"""
return compress(count(), map(pred, iterable))
def lstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the beginning
for which *pred* returns ``True``.
For example, to remove a set of items from the start of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(lstrip(iterable, pred))
[1, 2, None, 3, False, None]
This function is analogous to to :func:`str.lstrip`, and is essentially
an wrapper for :func:`itertools.dropwhile`.
"""
return dropwhile(pred, iterable)
def rstrip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the end
for which *pred* returns ``True``.
For example, to remove a set of items from the end of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(rstrip(iterable, pred))
[None, False, None, 1, 2, None, 3]
This function is analogous to :func:`str.rstrip`.
"""
cache = []
cache_append = cache.append
for x in iterable:
if pred(x):
cache_append(x)
else:
for y in cache:
yield y
del cache[:]
yield x
def strip(iterable, pred):
"""Yield the items from *iterable*, but strip any from the
beginning and end for which *pred* returns ``True``.
For example, to remove a set of items from both ends of an iterable:
>>> iterable = (None, False, None, 1, 2, None, 3, False, None)
>>> pred = lambda x: x in {None, False, ''}
>>> list(strip(iterable, pred))
[1, 2, None, 3]
This function is analogous to :func:`str.strip`.
"""
return rstrip(lstrip(iterable, pred), pred)
def islice_extended(iterable, *args):
"""An extension of :func:`itertools.islice` that supports negative values
for *stop*, *start*, and *step*.
>>> iterable = iter('abcdefgh')
>>> list(islice_extended(iterable, -4, -1))
['e', 'f', 'g']
Slices with negative values require some caching of *iterable*, but this
function takes care to minimize the amount of memory required.
For example, you can use a negative step with an infinite iterator:
>>> from itertools import count
>>> list(islice_extended(count(), 110, 99, -2))
[110, 108, 106, 104, 102, 100]
"""
s = slice(*args)
start = s.start
stop = s.stop
if s.step == 0:
raise ValueError('step argument must be a non-zero integer or None.')
step = s.step or 1
it = iter(iterable)
if step > 0:
start = 0 if (start is None) else start
if (start < 0):
# Consume all but the last -start items
cache = deque(enumerate(it, 1), maxlen=-start)
len_iter = cache[-1][0] if cache else 0
# Adjust start to be positive
i = max(len_iter + start, 0)
# Adjust stop to be positive
if stop is None:
j = len_iter
elif stop >= 0:
j = min(stop, len_iter)
else:
j = max(len_iter + stop, 0)
# Slice the cache
n = j - i
if n <= 0:
return
for index, item in islice(cache, 0, n, step):
yield item
elif (stop is not None) and (stop < 0):
# Advance to the start position
next(islice(it, start, start), None)
# When stop is negative, we have to carry -stop items while
# iterating
cache = deque(islice(it, -stop), maxlen=-stop)
for index, item in enumerate(it):
cached_item = cache.popleft()
if index % step == 0:
yield cached_item
cache.append(item)
else:
# When both start and stop are positive we have the normal case
for item in islice(it, start, stop, step):
yield item
else:
start = -1 if (start is None) else start
if (stop is not None) and (stop < 0):
# Consume all but the last items
n = -stop - 1
cache = deque(enumerate(it, 1), maxlen=n)
len_iter = cache[-1][0] if cache else 0
# If start and stop are both negative they are comparable and
# we can just slice. Otherwise we can adjust start to be negative
# and then slice.
if start < 0:
i, j = start, stop
else:
i, j = min(start - len_iter, -1), None
for index, item in list(cache)[i:j:step]:
yield item
else:
# Advance to the stop position
if stop is not None:
m = stop + 1
next(islice(it, m, m), None)
# stop is positive, so if start is negative they are not comparable
# and we need the rest of the items.
if start < 0:
i = start
n = None
# stop is None and start is positive, so we just need items up to
# the start index.
elif stop is None:
i = None
n = start + 1
# Both stop and start are positive, so they are comparable.
else:
i = None
n = start - stop
if n <= 0:
return
cache = list(islice(it, n))
for item in cache[i::step]:
yield item
def always_reversible(iterable):
"""An extension of :func:`reversed` that supports all iterables, not
just those which implement the ``Reversible`` or ``Sequence`` protocols.
>>> print(*always_reversible(x for x in range(3)))
2 1 0
If the iterable is already reversible, this function returns the
result of :func:`reversed()`. If the iterable is not reversible,
this function will cache the remaining items in the iterable and
yield them in reverse order, which may require significant storage.
"""
try:
return reversed(iterable)
except TypeError:
return reversed(list(iterable))
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
"""
for k, g in groupby(
enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
):
yield map(itemgetter(1), g)
def difference(iterable, func=sub):
"""By default, compute the first difference of *iterable* using
:func:`operator.sub`.
>>> iterable = [0, 1, 3, 6, 10]
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
This is the opposite of :func:`accumulate`'s default behavior:
>>> from more_itertools import accumulate
>>> iterable = [0, 1, 2, 3, 4]
>>> list(accumulate(iterable))
[0, 1, 3, 6, 10]
>>> list(difference(accumulate(iterable)))
[0, 1, 2, 3, 4]
By default *func* is :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120] # Factorial sequence
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
"""
a, b = tee(iterable)
try:
item = next(b)
except StopIteration:
return iter([])
return chain([item], map(lambda x: func(x[1], x[0]), zip(a, b)))
class SequenceView(Sequence):
"""Return a read-only view of the sequence object *target*.
:class:`SequenceView` objects are analagous to Python's built-in
"dictionary view" types. They provide a dynamic view of a sequence's items,
meaning that when the sequence updates, so does the view.
>>> seq = ['0', '1', '2']
>>> view = SequenceView(seq)
>>> view
SequenceView(['0', '1', '2'])
>>> seq.append('3')
>>> view
SequenceView(['0', '1', '2', '3'])
Sequence views support indexing, slicing, and length queries. They act
like the underlying sequence, except they don't allow assignment:
>>> view[1]
'1'
>>> view[1:-1]
['1', '2']
>>> len(view)
4
Sequence views are useful as an alternative to copying, as they don't
require (much) extra storage.
"""
def __init__(self, target):
if not isinstance(target, Sequence):
raise TypeError
self._target = target
def __getitem__(self, index):
return self._target[index]
def __len__(self):
return len(self._target)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self._target))
class seekable(object):
"""Wrap an iterator to allow for seeking backward and forward. This
progressively caches the items in the source iterable so they can be
re-visited.
Call :meth:`seek` with an index to seek to that position in the source
iterable.
To "reset" an iterator, seek to ``0``:
>>> from itertools import count
>>> it = seekable((str(n) for n in count()))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> it.seek(0)
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> next(it)
'3'
You can also seek forward:
>>> it = seekable((str(n) for n in range(20)))
>>> it.seek(10)
>>> next(it)
'10'
>>> it.seek(20) # Seeking past the end of the source isn't a problem
>>> list(it)
[]
>>> it.seek(0) # Resetting works even after hitting the end
>>> next(it), next(it), next(it)
('0', '1', '2')
The cache grows as the source iterable progresses, so beware of wrapping
very large or infinite iterables.
You may view the contents of the cache with the :meth:`elements` method.
That returns a :class:`SequenceView`, a view that updates automatically:
>>> it = seekable((str(n) for n in range(10)))
>>> next(it), next(it), next(it)
('0', '1', '2')
>>> elements = it.elements()
>>> elements
SequenceView(['0', '1', '2'])
>>> next(it)
'3'
>>> elements
SequenceView(['0', '1', '2', '3'])
"""
def __init__(self, iterable):
self._source = iter(iterable)
self._cache = []
self._index = None
def __iter__(self):
return self
def __next__(self):
if self._index is not None:
try:
item = self._cache[self._index]
except IndexError:
self._index = None
else:
self._index += 1
return item
item = next(self._source)
self._cache.append(item)
return item
next = __next__
def elements(self):
return SequenceView(self._cache)
def seek(self, index):
self._index = index
remainder = index - len(self._cache)
if remainder > 0:
consume(self, remainder)
class run_length(object):
"""
:func:`run_length.encode` compresses an iterable with run-length encoding.
It yields groups of repeated items with the count of how many times they
were repeated:
>>> uncompressed = 'abbcccdddd'
>>> list(run_length.encode(uncompressed))
[('a', 1), ('b', 2), ('c', 3), ('d', 4)]
:func:`run_length.decode` decompresses an iterable that was previously
compressed with run-length encoding. It yields the items of the
decompressed iterable:
>>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
>>> list(run_length.decode(compressed))
['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
"""
@staticmethod
def encode(iterable):
return ((k, ilen(g)) for k, g in groupby(iterable))
@staticmethod
def decode(iterable):
return chain.from_iterable(repeat(k, n) for k, n in iterable)
def exactly_n(iterable, n, predicate=bool):
"""Return ``True`` if exactly ``n`` items in the iterable are ``True``
according to the *predicate* function.
>>> exactly_n([True, True, False], 2)
True
>>> exactly_n([True, True, False], 1)
False
>>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
True
The iterable will be advanced until ``n + 1`` truthy items are encountered,
so avoid calling it on infinite iterables.
"""
return len(take(n + 1, filter(predicate, iterable))) == n
def circular_shifts(iterable):
"""Return a list of circular shifts of *iterable*.
>>> circular_shifts(range(4))
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
"""
lst = list(iterable)
return take(len(lst), windowed(cycle(lst), len(lst)))
def make_decorator(wrapping_func, result_index=0):
"""Return a decorator version of *wrapping_func*, which is a function that
modifies an iterable. *result_index* is the position in that function's
signature where the iterable goes.
This lets you use itertools on the "production end," i.e. at function
definition. This can augment what the function returns without changing the
function's code.
For example, to produce a decorator version of :func:`chunked`:
>>> from more_itertools import chunked
>>> chunker = make_decorator(chunked, result_index=0)
>>> @chunker(3)
... def iter_range(n):
... return iter(range(n))
...
>>> list(iter_range(9))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
To only allow truthy items to be returned:
>>> truth_serum = make_decorator(filter, result_index=1)
>>> @truth_serum(bool)
... def boolean_test():
... return [0, 1, '', ' ', False, True]
...
>>> list(boolean_test())
[1, ' ', True]
The :func:`peekable` and :func:`seekable` wrappers make for practical
decorators:
>>> from more_itertools import peekable
>>> peekable_function = make_decorator(peekable)
>>> @peekable_function()
... def str_range(*args):
... return (str(x) for x in range(*args))
...
>>> it = str_range(1, 20, 2)
>>> next(it), next(it), next(it)
('1', '3', '5')
>>> it.peek()
'7'
>>> next(it)
'7'
"""
# See https://sites.google.com/site/bbayles/index/decorator_factory for
# notes on how this works.
def decorator(*wrapping_args, **wrapping_kwargs):
def outer_wrapper(f):
def inner_wrapper(*args, **kwargs):
result = f(*args, **kwargs)
wrapping_args_ = list(wrapping_args)
wrapping_args_.insert(result_index, result)
return wrapping_func(*wrapping_args_, **wrapping_kwargs)
return inner_wrapper
return outer_wrapper
return decorator
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
"""Return a dictionary that maps the items in *iterable* to categories
defined by *keyfunc*, transforms them with *valuefunc*, and
then summarizes them by category with *reducefunc*.
*valuefunc* defaults to the identity function if it is unspecified.
If *reducefunc* is unspecified, no summarization takes place:
>>> keyfunc = lambda x: x.upper()
>>> result = map_reduce('abbccc', keyfunc)
>>> sorted(result.items())
[('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
Specifying *valuefunc* transforms the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> result = map_reduce('abbccc', keyfunc, valuefunc)
>>> sorted(result.items())
[('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
Specifying *reducefunc* summarizes the categorized items:
>>> keyfunc = lambda x: x.upper()
>>> valuefunc = lambda x: 1
>>> reducefunc = sum
>>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
>>> sorted(result.items())
[('A', 1), ('B', 2), ('C', 3)]
You may want to filter the input iterable before applying the map/reduce
proecdure:
>>> all_items = range(30)
>>> items = [x for x in all_items if 10 <= x <= 20] # Filter
>>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
>>> categories = map_reduce(items, keyfunc=keyfunc)
>>> sorted(categories.items())
[(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
>>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
>>> sorted(summaries.items())
[(0, 90), (1, 75)]
Note that all items in the iterable are gathered into a list before the
summarization step, which may require significant storage.
The returned object is a :obj:`collections.defaultdict` with the
``default_factory`` set to ``None``, such that it behaves like a normal
dictionary.
"""
valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
ret = defaultdict(list)
for item in iterable:
key = keyfunc(item)
value = valuefunc(item)
ret[key].append(value)
if reducefunc is not None:
for key, value_list in ret.items():
ret[key] = reducefunc(value_list)
ret.default_factory = None
return ret
|
mpl-2.0
|
johnjosephhorton/create_project
|
templates/templates.py
|
1
|
5731
|
LATEX_BASE_PAPER = """
\\documentclass[11pt]{article}
\\usepackage{booktabs}
\\usepackage{dcolumn}
\\usepackage{epstopdf}
\\usepackage{fourier}
\\usepackage{fullpage}
\\usepackage{graphicx}
\\usepackage{hyperref}
\\usepackage{longtable}
\\usepackage{natbib}
\\usepackage{rotating}
\\usepackage{tabularx}
\\usepackage{amsmath}
\\usepackage{algorithmic}
\\usepackage{algorithm2e}
\\hypersetup{
colorlinks = TRUE,
citecolor=blue,
linkcolor=red,
urlcolor=black
}
\\begin{document}
\\title{Here is a really great title}
\\date{\today}
\\author{John J. Horton \\\\ oDesk Research \\& Harvard Kennedy
School\\footnote{Author contact information, datasets and code are
currently or will be available at
\\href{http://www.john-joseph-horton.com/}{http://www.john-joseph-horton.com/}.}}
\\maketitle
\\begin{abstract}
\\noindent Here is a really great abstract. \\newline
\\noindent JEL J01, J24, J3
\\end{abstract}
\\section{Introduction}
\\cite{smith1999wealth} had some great ideas!
\\section{Getting some stuff done in R}
According to R's calculations, $1 + 1$ is equal to:
\\input{./numbers/tough_problem.txt}
\\subsection{Plots!}
\\begin{figure}[h]
\\centering
\\includegraphics[scale=0.25]{./plots/hist.png}
\\caption{Here is a figure}
\\label{fig:hist}
\\end{figure}
\\subsection{We can make R get data from our database}
\\input{./numbers/sql_output.txt}
\\section{Inputted Model}
\\input{model.tex}
\\section{Using matplotlib for making figures}
\\begin{figure}[h]
\\centering
\\includegraphics[scale=0.25]{./diagrams/matplotlib.png}
\\caption{Here is a matplot lib constructed figure}
\\label{fig:matplotlib}
\\end{figure}
\\bibliographystyle{aer}
\\bibliography{%s.bib}
\\end{document}
"""
LATEX_INPUT_FILE="""
\\input{insitustart.tex}
Here is a model
\\input{insituend.tex}
"""
SQLMAKE = """groups:
P1:
setup: one_plus_one.sql
output:
- get_one_plus_one.sql
"""
SQLCODE_SETUP = """
CREATE OR REPLACE VIEW analytics.test as
SELECT 1 + 1;
"""
SQLCODE_OUTPUT = """
SELECT * FROM analytics.test;
"""
RMAKE = """scripts: [%s.R]"""
RCODE = """
library(ggplot2)
library(RPostgreSQL)
sink("../../writeup/numbers/tough_problem.txt")
cat(1+1)
sink()
png("../../writeup/plots/hist.png")
qplot(runif(100))
dev.off()
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, %s)
df.raw <- dbGetQuery(con, "select * from analytics.test")
sink("../../writeup/numbers/sql_output.txt")
summary(df.raw)
sink()
"""
LOCAL_MAKE = """
import os
import sys
sys.path.append('%s')
import research_tools.create_paper as cp
if __name__ == '__main__':
options, args = cp.parse_terminal_input()
input_dir = os.getcwd()
cp.main(input_dir, options.output_path, options.flush, options.get_data, options.run_r, options.run_py)
"""
BIBTEX ="""
@book{smith1999wealth,
title={Wealth of nations},
author={Smith, A.},
year={1999},
publisher={Wiley Online Library}
}
"""
TEMPLATE_R = """
"""
LATEX_LOG_FILE_HEADER = """
<html>
<head>
<link rel="stylesheet" href="%s">
</head>
<body>
<a name="top">
"""
TEMPLATE_HTML_INDEX = """
<html>
<head>
<link rel="stylesheet" href="http://twitter.github.com/bootstrap/1.4.0/bootstrap.min.css">
<script>
function copyToClipboard (text) {
window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);
}
</script>
</head>
<body>
%s
<h1>Resources</h1>
<ul>
<li><a href="./submit/%s.pdf" target="_blank">PDF of the paper</a></li>
<li><a href="./writeup/%s.tex.html">HTML of tex source</a></li>
<li><a href=".">Directory listing</a></li>
<li>All LaTeX Stitched Together <a href="./combined_file.tex">(tex)</a><a href="./combined_file.tex.html">(html)</a></li>
<li><a href="./%s">LaTeX Log File</a></li>
</ul>
</body>
<button type="button" onClick="copyToClipboard('%s')">Copy directory path</button>
</html>
"""
MATPLOTLIB_EXAMPLE = """
import numpy as np
import matplotlib.pyplot as plt
a = np.arange(0,3,.02)
b = np.arange(0,3,.02)
c = np.exp(a)
d = c[::-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(a,c,'k--',a,d,'k:',a,c+d,'k')
leg = ax.legend(('Model length', 'Data length', 'Total message length'),
'upper center', shadow=True)
ax.set_ylim([-1,20])
ax.grid(False)
ax.set_xlabel('Model complexity --->')
ax.set_ylabel('Message length --->')
ax.set_title('Minimum Message Length')
ax.set_yticklabels([])
ax.set_xticklabels([])
# set some legend properties. All the code below is optional. The
# defaults are usually sensible but if you need more control, this
# shows you how
# the matplotlib.patches.Rectangle instance surrounding the legend
frame = leg.get_frame()
frame.set_facecolor('0.80') # set the frame face color to light gray
# matplotlib.text.Text instances
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
# matplotlib.lines.Line2D instances
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
#plt.show()
plt.savefig("../../writeup/diagrams/matplotlib.png",
format="png")
"""
LATEX_INSITU_START = """
\\documentclass[11pt]{article}
\\usepackage{booktabs}
\\usepackage{colortbl}
\\usepackage{dcolumn}
\\usepackage{epstopdf}
\\usepackage{fourier}
\\usepackage{fullpage}
\\usepackage{graphicx}
\\usepackage{hyperref}
\\usepackage{longtable}
\\usepackage{natbib}
\\usepackage{rotating}
\\usepackage{setspace}
\\usepackage{Sweave}
\\usepackage{tabularx}
\\hypersetup{
colorlinks,
citecolor=blue,
linkcolor=blue,
urlcolor=blue,
filecolor=white
}
\\newtheorem{proposition}{Proposition}
\\title{Here is a title}
\\begin{document}
\\maketitle
"""
LATEX_INSITU_END = """
\\end{document}
"""
|
gpl-2.0
|
Lamarn/ivoctPy
|
scan.py
|
1
|
7695
|
import os.path
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import filters, feature
from skimage.measure import CircleModel, ransac
class Scan:
def __init__(self, start_at):
self.matrix = []
self.cut_matrix = []
self.peaks = []
self.polar_views = []
self.debug = False
self.ascan_size = 512
self.start_at = start_at
self.surface_threshold = 0.37
def plot_cut_matrix(self):
plt.figure(), plt.imshow(self.cut_matrix)
def load_data(self, file_name):
"""Load binary data into scaled matrix."""
if os.path.isfile(file_name):
f = open(file_name, "r")
# Load file from binary file
self.matrix = np.fromfile(f, np.float32, sep="")
# Reshape to matrix dimensions
self.matrix = np.reshape(self.matrix, (self.ascan_size, np.size(self.matrix) // self.ascan_size), order='F')
self.matrix = self.__scale_interval_zero_one(self.matrix)
self.cut_matrix = self.matrix[:, self.start_at:self.start_at + 5000]
self.preprocess_matrix()
print("Loading of data succefully finished.")
else:
print("Error loading file.")
@staticmethod
def __scale_interval_zero_one(matrix):
"""Scale matrix values down to interval [0, 1]."""
matrix = np.array(matrix)
min_v = np.min(matrix)
max_v = np.max(matrix)
quotient = 1.0 / (max_v - min_v)
print("Matrix scaled.")
return matrix * quotient
def find_peaks(self):
"""Find peaks from matrix, showing a sinus curve."""
matrix = self.cut_matrix
min_width = 850
max_width = 1400
skin_layer_cut = matrix[85:120, :]
skin_layer_med = filters.median(skin_layer_cut, np.ones([5, 5]))
skin_layer = feature.canny(skin_layer_med, sigma=1)
if self.debug:
plt.figure(), plt.imshow(skin_layer_med)
plt.figure(), plt.imshow(skin_layer)
plt.figure(), plt.imshow(skin_layer)
skin_layer_shape = np.shape(skin_layer)
peaks = []
min_value = skin_layer_shape[0]
# Find first peak
for c in range(0, 800):
for a in range(skin_layer_shape[0] - 1, 0, -1):
if skin_layer[a, c] and a < min_value:
min_value = a
peak_at = c
peaks.append(peak_at)
# Find following peaks
while peak_at + max_width < skin_layer_shape[1]:
min_value = skin_layer_shape[0]
temp_matrix = skin_layer[:, peaks[-1] + min_width: peaks[-1] + max_width]
for c in range(0, np.shape(temp_matrix)[1]):
for a in range(skin_layer_shape[0] - 1, 0, -1):
if skin_layer[a, c] and a < min_value:
min_value = a
peak_at = c
peak_at = peaks[-1] + min_width + peak_at
peaks.append(peak_at)
self.peaks = peaks
print("Found peaks: " + str(peaks) + ". Searched from: " + str(self.start_at) + " until: " + str(
self.start_at + 5000))
# Plot vertical line, where peak was found.
if self.debug:
for i in peaks:
skin_layer[:, i] = 1
plt.figure(), plt.imshow(skin_layer)
def create_polar_views(self):
"""Create polar views and save in array."""
polar_vec = []
length_peaks = len(self.peaks)
for i in range(0, length_peaks):
if i + 1 < length_peaks:
matrix = self.cut_matrix[:, self.peaks[i]: self.peaks[i + 1]]
polar_matrix = np.empty([1024, 1024])
matrix_shape = np.shape(matrix)
for x in range(0, matrix_shape[1]):
for y in range(0, matrix_shape[0]):
xp = round(y * math.cos(2 * x * math.pi / matrix_shape[1]) + self.ascan_size)
yp = round(y * math.sin(2 * x * math.pi / matrix_shape[1]) + self.ascan_size)
polar_matrix[xp, yp] = matrix[y, x]
polar_vec.append(polar_matrix)
print(polar_matrix)
if self.debug:
plt.figure(), plt.imshow(polar_matrix)
self.polar_views = polar_vec
print("Succesfully saved all polar images.")
def find_circles(self):
for i in range(0, len(self.polar_views)):
self.polar_views[i] = self.interpolation_polar_view(self.polar_views[i])
polar_view_canny = feature.canny(filters.median(self.polar_views[i], np.ones([5, 5])))
points = np.array(np.nonzero(polar_view_canny)).T
model_robust, inliers = ransac(points, CircleModel, min_samples=3, residual_threshold=2, max_trials=1000)
cy, cx, r = model_robust.params
f, (ax0, ax1) = plt.subplots(1, 2)
ax0.imshow(self.polar_views[i])
ax1.imshow(self.polar_views[i])
ax1.plot(points[inliers, 1], points[inliers, 0], 'b.', markersize=1)
ax1.plot(points[~inliers, 1], points[~inliers, 0], 'g.', markersize=1)
circle = plt.Circle((cx, cy), radius=r, facecolor='none', linewidth=2)
ax0.add_patch(circle)
@staticmethod
def cartesian_coordinates(rho, fi):
return round(rho * math.cos(fi)), round(rho * math.sin(fi))
@staticmethod
def polar_coordinates(x, y):
if x == 0.0:
x = 0.1
rho = round(math.sqrt(x ** 2 + y ** 2))
fi = round(math.degrees(math.atan(y / x)))
# Add degrees adapted to quadrant
if x < 0 and y >= 0: # second quadrant
fi += 180
elif x < 0 and y < 0: # third quadrant
fi += 180
elif x >= 0 and y < 0: # fourth quadrant
fi += 360
return rho, fi
def interpolation_polar_view(self, matrix, width):
for x in range(0, np.shape(matrix)[1]):
for y in range(0, np.shape(matrix)[0]):
x_new = x - 512
y_new = y - 512
if matrix[x, y] == 0 and (x_new ** 2 + y_new ** 2) < 262144:
values_in_range = []
rho, fi = self.polar_coordinates(x_new, y_new)
if self.debug:
print("[" + str(rho) + ", " + str(fi) + "]")
for i in range(-width, width):
for j in range(-2, 2):
x_near, y_near = self.cartesian_coordinates(rho + j, fi + i)
near_value = matrix[x_near, y_near]
if near_value != 0:
values_in_range.append(near_value)
if len(values_in_range) > 0:
matrix[x, y] = np.average(values_in_range)
values_in_range.clear()
return matrix
def preprocess_matrix(self):
"""Preprocess values of matrix, to get a homogeneous image."""
self.cut_matrix[self.cut_matrix > self.surface_threshold] = 1.0
self.cut_matrix[self.cut_matrix < 0] = 0.0
m1 = np.ma.masked_inside(self.cut_matrix, 0.0, 0.1)
m1 = 0.1
m2 = np.ma.masked_inside(self.cut_matrix, 0.1, 0.2)
m2 = 0.2
m3 = np.ma.masked_inside(self.cut_matrix, 0.2, 0.3)
m3 = 0.3
m4 = np.ma.masked_inside(self.cut_matrix, 0.3, 0.37)
m4 = 0.37
def load_scan(self, path):
self.load_data(path)
def process_scan(self):
self.find_peaks()
self.create_polar_views()
|
gpl-3.0
|
basnijholt/holoviews
|
holoviews/tests/plotting/matplotlib/testplot.py
|
2
|
1128
|
from unittest import SkipTest
from holoviews.core.options import Store
from holoviews.element.comparison import ComparisonTestCase
import pyviz_comms as comms
try:
import holoviews.plotting.mpl # noqa
import matplotlib.pyplot as plt
mpl_renderer = Store.renderers['matplotlib']
except:
mpl_renderer = None
from .. import option_intersections
class TestPlotDefinitions(ComparisonTestCase):
known_clashes = [(('Arrow',), {'fontsize'})]
def test_matplotlib_plot_definitions(self):
self.assertEqual(option_intersections('matplotlib'), self.known_clashes)
class TestMPLPlot(ComparisonTestCase):
def setUp(self):
self.previous_backend = Store.current_backend
self.comm_manager = mpl_renderer.comm_manager
mpl_renderer.comm_manager = comms.CommManager
if not mpl_renderer:
raise SkipTest("Matplotlib required to test plot instantiation")
Store.current_backend = 'matplotlib'
def tearDown(self):
Store.current_backend = self.previous_backend
mpl_renderer.comm_manager = self.comm_manager
plt.close(plt.gcf())
|
bsd-3-clause
|
ghislainv/roadless
|
Asia/roadless/run_modelling_steps.py
|
1
|
10555
|
#!/usr/bin/python
# ==============================================================================
# author :Ghislain Vieilledent
# email :[email protected], [email protected]
# web :https://ghislainv.github.io
# python_version :2.7
# license :GPLv3
# ==============================================================================
import os
import numpy as np
from patsy import dmatrices
import deforestprob as dfp
import matplotlib.pyplot as plt
import pickle
# run_modelling_steps
def run_modelling_steps(fcc_source="roadless"):
# Make output directory
dfp.make_dir("output_roadless")
# ========================================================
# Sample points
# ========================================================
dataset = dfp.sample(nsamp=10000, Seed=1234, csize=10,
var_dir="data",
input_forest_raster="fcc23.tif",
output_file="output_roadless/sample.txt",
blk_rows=0)
# To import data as pandas DataFrame if necessary
# import pandas as pd
# dataset = pd.read_table("output_roadless/sample.txt", delimiter=",")
# dataset.head(5)
# Descriptive statistics
# Model formulas
formula_1 = "fcc23 ~ dist_road + dist_town + dist_river + \
dist_defor + dist_edge + altitude + slope + aspect - 1"
# Standardized variables (mean=0, std=1)
formula_2 = "fcc23 ~ scale(dist_road) + scale(dist_town) + \
scale(dist_river) + scale(dist_defor) + scale(dist_edge) + \
scale(altitude) + scale(slope) + scale(aspect) - 1"
formulas = (formula_1, formula_2)
# Remove NA from data-set (otherwise scale() and
# model_binomial_iCAR doesn't work)
dataset = dataset.dropna(axis=0)
# Loop on formulas
for f in range(len(formulas)):
# Output file
of = "output_roadless/correlation_" + str(f) + ".pdf"
# Data
y, data = dmatrices(formulas[f], data=dataset,
return_type="dataframe")
# Plots
figs = dfp.plot.correlation(y=y, data=data,
plots_per_page=3,
figsize=(7, 8),
dpi=300,
output_file=of)
plt.close("all")
# ========================================================
# hSDM model
# ========================================================
# Set number of trials to one
dataset["trial"] = 1
# Spatial cells for spatial-autocorrelation
nneigh, adj = dfp.cellneigh(raster="data/fcc23.tif", csize=10, rank=1)
# List of variables
variables = ["C(pa)", "scale(altitude)", "scale(slope)",
"scale(dist_defor)", "scale(dist_edge)", "scale(dist_road)",
"scale(dist_town)", "scale(dist_river)"]
variables = np.array(variables)
# Run model while there is non-significant variables
var_remove = True
while(np.any(var_remove)):
# Formula
right_part = " + ".join(variables) + " + cell"
left_part = "I(1-fcc23) + trial ~ "
formula = left_part + right_part
# Model
mod_binomial_iCAR = dfp.model_binomial_iCAR(
# Observations
suitability_formula=formula, data=dataset,
# Spatial structure
n_neighbors=nneigh, neighbors=adj,
# Chains
burnin=1000, mcmc=1000, thin=1,
# Starting values
beta_start=-99)
# Ecological and statistical significance
effects = mod_binomial_iCAR.betas[1:]
# MCMC = mod_binomial_iCAR.mcmc
# CI_low = np.percentile(MCMC, 2.5, axis=0)[1:-2]
# CI_high = np.percentile(MCMC, 97.5, axis=0)[1:-2]
positive_effects = (effects >= 0)
# zero_in_CI = ((CI_low * CI_high) <= 0)
# Keeping only significant variables
var_remove = positive_effects
# var_remove = np.logical_or(positive_effects, zero_in_CI)
var_keep = np.logical_not(var_remove)
variables = variables[var_keep]
# Re-run the model with longer MCMC and estimated initial values
mod_binomial_iCAR = dfp.model_binomial_iCAR(
# Observations
suitability_formula=formula, data=dataset,
# Spatial structure
n_neighbors=nneigh, neighbors=adj,
# Chains
burnin=5000, mcmc=5000, thin=5,
# Starting values
beta_start=mod_binomial_iCAR.betas)
# Summary
print(mod_binomial_iCAR)
# Write summary in file
f = open("output_roadless/summary_hSDM.txt", "w")
f.write(str(mod_binomial_iCAR))
f.close()
# Plot
figs = mod_binomial_iCAR.plot(output_file="output_roadless/mcmc.pdf",
plots_per_page=3,
figsize=(9, 6),
dpi=300)
plt.close("all")
# ========================================================
# Resampling spatial random effects
# ========================================================
# Spatial random effects
rho = mod_binomial_iCAR.rho
# Resample
dfp.resample_rho(rho=rho, input_raster="data/fcc23.tif",
output_file="output_roadless/rho.tif",
csize_orig=10, csize_new=1)
# ========================================================
# Predicting spatial probability of deforestation
# ========================================================
# We assume dist_edge and dist_defor don't change between t2
# and t3 (deforestation ~1%). No need to recompute them.
# Rename aspect.tif in data directory to avoid NA where slope=0
os.rename("data/aspect.tif", "data/aspect.tif.bak")
# Compute predictions
dfp.predict(mod_binomial_iCAR, var_dir="data",
input_cell_raster="output_roadless/rho.tif",
input_forest_raster="data/forest/forest_t3.tif",
output_file="output_roadless/prob.tif",
blk_rows=128)
# Rename aspect.tif.bak
os.rename("data/aspect.tif.bak", "data/aspect.tif")
# ========================================================
# Mean annual deforestation rate (ha.yr-1)
# ========================================================
# Forest cover
fc = list()
for i in range(4):
rast = "data/forest/forest_t" + str(i) + ".tif"
val = dfp.countpix(input_raster=rast,
value=1)
fc.append(val["area"])
# Save results to disk
f = open("output_roadless/forest_cover.txt", "w")
for i in fc:
f.write(str(i) + "\n")
f.close()
# Annual deforestation
T = 10.0 if (fcc_source == "roadless") else 9.0
annual_defor = (fc[1] - fc[3]) / T
# Amount of deforestation (ha)
defor_10yr = np.rint(annual_defor * 10)
defor_35yr = np.rint(annual_defor * 35)
# ========================================================
# Predicting forest cover change
# ========================================================
# Compute future forest cover
stats = dfp.deforest(input_raster="output_roadless/prob.tif",
hectares=defor_35yr,
output_file="output_roadless/fcc_35yr.tif",
blk_rows=128)
# Save stats to disk with pickle
pickle.dump(stats, open("output_roadless/stats.pickle", "wb"))
# Plot histograms of probabilities
fig_freq = dfp.plot.freq_prob(stats,
output_file="output_roadless/freq_prob.png")
plt.close(fig_freq)
# Forest cover change with half deforestation
stats = dfp.deforest(input_raster="output_roadless/prob.tif",
hectares=np.rint(defor_35yr / 2.0),
output_file="output_roadless/fcc_35yr_half.tif",
blk_rows=128)
# Forest cover change after 10 years
stats = dfp.deforest(input_raster="output_roadless/prob.tif",
hectares=defor_10yr,
output_file="output_roadless/fcc_10yr.tif",
blk_rows=128)
# ========================================================
# Figures
# ========================================================
# Forest in 2015
fig_forest = dfp.plot.forest("data/forest/forest_t3.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/forest_t3.png")
plt.close(fig_forest)
# Forest-cover change 2005-2015
fig_fcc = dfp.plot.fcc("data/forest/fcc13.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/fcc13.png")
plt.close(fig_fcc)
# Original spatial random effects
fig_rho_orig = dfp.plot.rho("output_roadless/rho_orig.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/rho_orig.png")
plt.close(fig_rho_orig)
# Interpolated spatial random effects
fig_rho = dfp.plot.rho("output_roadless/rho.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/rho.png")
plt.close(fig_rho)
# Spatial probability of deforestation
fig_prob = dfp.plot.prob("output_roadless/prob.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/prob.png")
plt.close(fig_prob)
# Forest-cover change 2015-2050
fig_fcc_35yr = dfp.plot.fcc("output_roadless/fcc_35yr.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/fcc_35yr.png")
plt.close(fig_fcc_35yr)
# Forest-cover change 2015-2025
fig_fcc_10yr = dfp.plot.fcc("output_roadless/fcc_10yr.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/fcc_10yr.png")
plt.close(fig_fcc_10yr)
# Forest-cover change 2015-2050 with half deforestation
fig_fcc_35yr_half = dfp.plot.fcc("output_roadless/fcc_35yr_half.tif",
borders="data/ctry_PROJ.shp",
output_file="output_roadless/fcc_35yr_half.png")
plt.close(fig_fcc_35yr_half)
# End
|
gpl-3.0
|
RPGOne/Skynet
|
imbalanced-learn-master/imblearn/under_sampling/random_under_sampler.py
|
1
|
5608
|
"""Class to perform random under-sampling."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.utils import check_random_state
from ..base import SamplerMixin
class RandomUnderSampler(SamplerMixin):
"""Class to perform random under-sampling.
Under-sample the majority class(es) by randomly picking samples
with or without replacement.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly selected
from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary containing the number of occurences of each class.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
This class supports multi-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import RandomUnderSampler
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> rus = RandomUnderSampler(random_state=42)
>>> X_res, y_res = rus.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({0: 100, 1: 100})
"""
def __init__(self, ratio='auto', return_indices=False, random_state=None,
replacement=True):
super(RandomUnderSampler, self).__init__(ratio=ratio)
self.return_indices = return_indices
self.random_state = random_state
self.replacement = replacement
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, an array will be returned
containing a boolean for each sample to represent whether
that sample was selected or not.
"""
random_state = check_random_state(self.random_state)
# Compute the number of clusters needed
if self.ratio == 'auto':
num_samples = self.stats_c_[self.min_c_]
else:
num_samples = int(self.stats_c_[self.min_c_] / self.ratio)
# All the minority class samples will be preserved
X_resampled = X[y == self.min_c_]
y_resampled = y[y == self.min_c_]
# If we need to offer support for the indices
if self.return_indices:
idx_under = np.nonzero(y == self.min_c_)[0]
# Loop over the other classes under-picking at random
for key in self.stats_c_.keys():
# If the minority class is up, skip it
if key == self.min_c_:
continue
# Pick some elements at random
indx = range(np.count_nonzero(y == key))
indx = random_state.choice(indx, size=num_samples,
replace=self.replacement)
# If we need to offer support for the indices selected
if self.return_indices:
idx_tmp = np.nonzero(y == key)[0][indx]
idx_under = np.concatenate((idx_under, idx_tmp), axis=0)
# Concatenate to the minority class
X_resampled = np.concatenate((X_resampled, X[y == key][indx]),
axis=0)
y_resampled = np.concatenate((y_resampled, y[y == key][indx]),
axis=0)
self.logger.info('Under-sampling performed: %s', Counter(y_resampled))
# Check if the indices of the samples selected should be returned as
# well
if self.return_indices:
# Return the indices of interest
return X_resampled, y_resampled, idx_under
else:
return X_resampled, y_resampled
|
bsd-3-clause
|
jseabold/scikit-learn
|
sklearn/externals/joblib/parallel.py
|
17
|
35626
|
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instanciation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (0 < batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
|
bsd-3-clause
|
WilsonWangTHU/clothesDetection
|
lib/fast_rcnn/test.py
|
1
|
12551
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
if not cfg.MULTI_LABEL:
return scores, pred_boxes
if not cfg.MULTI_LABEL_SOFTMAX:
labels = blobs_out['multi_label_score']
labels = labels[inv_index, :]
return scores, pred_boxes, labels
labels = \
np.hstack((blobs_out['texture_prob'][:,1:blobs_out['texture_prob'].shape[1]],
blobs_out['neckband_prob'][:,1:blobs_out['neckband_prob'].shape[1]],
blobs_out['sleeve_prob'][:,1:blobs_out['sleeve_prob'].shape[1]])).astype(np.float32)
labels = labels[inv_index, :]
return scores, pred_boxes, labels
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
|
mit
|
Chris7/pyquant
|
pyquant/worker.py
|
1
|
86728
|
from __future__ import division, unicode_literals, print_function
import sys
import os
import copy
import operator
import traceback
from functools import cmp_to_key
import pandas as pd
import numpy as np
from itertools import groupby, combinations
from collections import OrderedDict, defaultdict
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
from multiprocessing import Process
try:
from profilestats import profile # noqa: F401
from memory_profiler import profile as memory_profiler # noqa: F401
except ImportError:
pass
from scipy import integrate
from scipy.ndimage.filters import gaussian_filter1d
from pythomics.proteomics import config
from . import PEAK_RESOLUTION_RT_MODE
from . import peaks
from .utils import (
calculate_theoretical_distribution,
find_scan,
find_prior_scan,
find_next_scan,
nanmean,
find_common_peak_mean,
get_scan_resolution,
)
class Worker(Process):
def __init__(
self,
queue=None,
results=None,
precision=6,
raw_name=None,
mass_labels=None,
isotope_ppms=None,
debug=False,
html=False,
mono=False,
precursor_ppm=5.0,
isotope_ppm=2.5,
quant_method="integrate",
reader_in=None,
reader_out=None,
thread=None,
fitting_run=False,
msn_rt_map=None,
reporter_mode=False,
spline=None,
isotopologue_limit=-1,
labels_needed=1,
overlapping_mz=False,
min_resolution=0,
min_scans=3,
quant_msn_map=None,
mrm=False,
mrm_pair_info=None,
peak_cutoff=0.05,
ratio_cutoff=0,
replicate=False,
ref_label=None,
max_peaks=4,
parser_args=None,
scans_to_skip=None,
):
super(Worker, self).__init__()
self.precision = precision
self.precursor_ppm = precursor_ppm
self.isotope_ppm = isotope_ppm
self.queue = queue
self.reader_in, self.reader_out = reader_in, reader_out
self.msn_rt_map = pd.Series(msn_rt_map)
self.msn_rt_map.sort_values(inplace=True)
self.results = results
self.mass_labels = {"Light": {}} if mass_labels is None else mass_labels
self.shifts = {0: "Light"}
self.shifts.update(
{
sum(silac_masses.keys()): silac_label
for silac_label, silac_masses in self.mass_labels.items()
}
)
self.raw_name = raw_name
self.filename = os.path.split(self.raw_name)[1]
self.rt_tol = 0.2 # for fitting
self.debug = debug
self.html = html
self.mono = mono
self.thread = thread
self.fitting_run = fitting_run
self.isotope_ppms = isotope_ppms
self.quant_method = quant_method
self.reporter_mode = reporter_mode
self.spline = spline
self.isotopologue_limit = isotopologue_limit
self.labels_needed = labels_needed
self.overlapping_mz = overlapping_mz
self.min_resolution = min_resolution
self.min_scans = min_scans
self.quant_msn_map = quant_msn_map
self.mrm = mrm
self.mrm_pair_info = mrm_pair_info
self.peak_cutoff = peak_cutoff
self.replicate = replicate
self.ratio_cutoff = ratio_cutoff
self.ref_label = ref_label
self.max_peaks = max_peaks
self.parser_args = parser_args
if mrm:
self.quant_mrm_map = {
label: list(group)
for label, group in groupby(
self.quant_msn_map, key=operator.itemgetter(0)
)
}
self.peaks_n = self.parser_args.peaks_n
self.rt_guide = not self.parser_args.no_rt_guide
self.filter_peaks = not self.parser_args.disable_peak_filtering
self.report_ratios = not self.parser_args.no_ratios
self.bigauss_stepsize = 6 if self.parser_args.fit_baseline else 4
self.xic_missing_ion_count = self.parser_args.xic_missing_ion_count
self.scans_to_skip = scans_to_skip or {}
# This is a convenience object to pass to the findAllPeaks function since it is called quite a few times
self.peak_finding_kwargs = {
"max_peaks": self.max_peaks,
"debug": self.debug,
"snr": self.parser_args.snr_filter,
"amplitude_filter": self.parser_args.intensity_filter,
"min_dist": self.parser_args.min_peak_separation,
"fit_baseline": self.parser_args.fit_baseline,
"zscore": self.parser_args.zscore_filter,
"local_filter_size": self.parser_args.filter_width,
"percentile_filter": self.parser_args.percentile_filter,
"smooth": self.parser_args.xic_smooth,
"r2_cutoff": self.parser_args.r2_cutoff,
"gap_interpolation": self.parser_args.gap_interpolation,
"fit_mode": self.parser_args.peak_find_mode,
}
def get_calibrated_mass(self, mass):
return mass / (1 - self.spline(mass) / 1e6) if self.spline else mass
def low_snr(self, scan_intensities, thresh=0.3):
std = np.std(scan_intensities)
last_point = nanmean(scan_intensities[-3:])
# check the SNR of the last points, if its bad, get out
return (last_point / std) < thresh
def replaceOutliers(self, common_peaks, combined_data, debug=False):
x = []
y = []
tx = []
ty = []
ty2 = []
hx = []
hy = []
keys = []
hkeys = []
y2 = []
hy2 = []
for i, v in common_peaks.items():
for isotope, found_peaks in v.items():
for peak_index, peak in enumerate(found_peaks):
keys.append((i, isotope, peak_index))
mean, std, std2 = peak["mean"], peak["std"], peak["std2"]
x.append(mean)
y.append(std)
y2.append(std2)
if peak.get("valid"):
tx.append(mean)
ty.append(std)
ty2.append(std2)
if self.mrm and i != "Light":
hx.append(mean)
hy.append(std)
hy2.append(std2)
hkeys.append((i, isotope, peak_index))
classifier = EllipticEnvelope(support_fraction=0.75, random_state=0)
if len(x) == 1:
return x[0]
data = np.array([x, y, y2]).T
true_data = np.array([tx, ty, ty2]).T
false_pred = (False, -1)
true_pred = (True, 1)
to_delete = set([])
fitted = False
true_data = (
np.vstack({tuple(row) for row in true_data}) if true_data.shape[0] else None
)
if true_data is not None and true_data.shape[0] >= 3:
fit_data = true_data
else:
fit_data = np.vstack({tuple(row) for row in data})
if len(hx) >= 3 or fit_data.shape[0] >= 3:
if debug:
print(common_peaks)
try:
classifier.fit(np.array([hx, hy, hy2]).T if self.mrm else fit_data)
fitted = True
# x_mean, x_std1, x_std2 = classifier.location_
except Exception as e:
try:
classifier = OneClassSVM(
nu=0.95 * 0.15 + 0.05,
kernel=str("linear"),
degree=1,
random_state=0,
)
classifier.fit(np.array([hx, hy, hy2]).T if self.mrm else fit_data)
fitted = True
except Exception as e:
if debug:
print(traceback.format_exc(), data)
x_mean, x_std1, x_std2 = np.median(data, axis=0)
if fitted:
classes = classifier.predict(data)
try:
if hasattr(classifier, "location_"):
x_mean, x_std1, x_std2 = classifier.location_
else:
x_mean, x_std1, x_std2 = np.median(data[classes == 1], axis=0)
except IndexError:
x_mean, x_std1, x_std2 = np.median(data, axis=0)
else:
x_inlier_indices = [
i
for i, v in enumerate(classes)
if v in true_pred
or common_peaks[keys[i][0]][keys[i][1]][keys[i][2]].get("valid")
]
x_inliers = set([keys[i][:2] for i in sorted(x_inlier_indices)])
x_outliers = [
i
for i, v in enumerate(classes)
if keys[i][:2] not in x_inliers
and (
v in false_pred
or common_peaks[keys[i][0]][keys[i][1]][keys[i][2]].get(
"interpolate"
)
)
]
if debug:
print("inliers", x_inliers)
print("outliers", x_outliers)
# print('x1o', x1_outliers)
min_x = x_mean - x_std1
max_x = x_mean + x_std2
for index in x_inlier_indices:
indexer = keys[index]
peak_info = common_peaks[indexer[0]][indexer[1]][indexer[2]]
peak_min = peak_info["mean"] - peak_info["std"]
peak_max = peak_info["mean"] + peak_info["std2"]
if peak_min < min_x:
min_x = peak_min
if peak_max > max_x:
max_x = peak_max
if x_inliers:
for index in x_outliers:
indexer = keys[index]
if x_inliers is not None and indexer[:2] in x_inliers:
# this outlier has a valid inlying value in x1_inliers, so we delete it
to_delete.add(indexer)
else:
# there is no non-outlying data point. If this data point is > 1 sigma away, delete it
peak_info = common_peaks[indexer[0]][indexer[1]][indexer[2]]
if debug:
print(indexer, peak_info, x_mean, x_std1, x_std2)
if not (min_x < peak_info["mean"] < max_x):
to_delete.add(indexer)
else:
# we do not have enough data for ML, if we have scenarios with a 'valid' peak, keep them other others
for quant_label, isotope_peaks in common_peaks.items():
for isotope, found_peaks in isotope_peaks.items():
keys.append((i, isotope, peak_index))
to_keep = []
to_remove = []
for peak_index, peak in enumerate(found_peaks):
if peak.get("valid"):
to_keep.append(peak_index)
else:
to_remove.append(peak_index)
if to_keep:
for i in sorted(to_remove, reverse=True):
del peaks[i]
if debug:
print("to remove", to_delete)
for i in sorted(set(to_delete), key=operator.itemgetter(0, 1, 2), reverse=True):
del common_peaks[i[0]][i[1]][i[2]]
return x_mean
def convertScan(self, scan):
import numpy as np
scan_vals = scan["vals"]
res = pd.Series(
scan_vals[:, 1].astype(np.uint64),
index=np.round(scan_vals[:, 0], self.precision),
name=int(scan["title"]) if self.mrm else scan["rt"],
dtype="uint64",
)
# mz values can sometimes be not sorted -- rare but it happens
res = res.sort_index()
del scan_vals
# due to precision, we have multiple m/z values at the same place. We can eliminate this by grouping them and summing them.
# Summation is the correct choice here because we are combining values of a precision higher than we care about.
try:
return res.groupby(level=0).sum() if not res.empty else None
except Exception as e:
print(
"Converting scan error {}\n{}\n{}\n".format(
traceback.format_exc(), res, scan
)
)
def getScan(self, ms1, start=None, end=None):
self.reader_in.put((self.thread, ms1, start, end))
scan = self.reader_out.get()
if scan is None:
print("Unable to fetch scan {}.\n".format(ms1))
return (
(self.convertScan(scan), {"centroid": scan.get("centroid", False)})
if scan is not None
else (None, {})
)
# @memory_profiler
# @line_profiler(extra_view=[peaks.findEnvelope, peaks.findAllPeaks, peaks.findMicro])
def quantify_peaks(self, params):
result_dict = {}
try:
html_images = {}
scan_info = params.get("scan_info")
target_scan = scan_info.get("id_scan")
quant_scan = scan_info.get("quant_scan")
scanId = target_scan.get("id")
ms1 = quant_scan["id"]
scans_to_quant = quant_scan.get("scans")
if scans_to_quant:
scans_to_quant.pop(scans_to_quant.index(ms1))
charge = target_scan["charge"]
mass = target_scan["mass"]
combine_xics = scan_info.get("combine_xics")
precursor = target_scan["precursor"]
calibrated_precursor = self.get_calibrated_mass(precursor)
theor_mass = target_scan.get("theor_mass", calibrated_precursor)
# this will be the RT of the target_scan, which is not always equal to the RT of the quant_scan
rt = target_scan["rt"]
peptide = target_scan.get("peptide")
if self.debug:
sys.stderr.write(
"thread {4} on ms {0} {1} {2} {3}\n".format(
ms1, rt, precursor, scan_info, id(self)
)
)
result_dict.update(
{
"peptide": target_scan.get("mod_peptide")
or target_scan.get("peptide"),
"scan": scanId,
"ms1": ms1,
"charge": charge,
"modifications": target_scan.get("modifications"),
"rt": rt,
"accession": target_scan.get("accession"),
}
)
if float(charge) == 0:
# We cannot proceed with a zero charge
self.results.put(result_dict)
return
precursors = defaultdict(dict)
silac_dict = {
"data": None,
"df": pd.DataFrame(),
"precursor": "NA",
"isotopes": {},
"peaks": OrderedDict(),
"intensity": "NA",
}
data = OrderedDict()
# data['Light'] = copy.deepcopy(silac_dict)
combined_data = pd.DataFrame()
if self.mrm:
mrm_labels = [
i
for i in self.mrm_pair_info.columns
if i.lower() not in ("retention time")
]
mrm_info = None
for index, values in self.mrm_pair_info.iterrows():
if values["Light"] == mass:
mrm_info = values
for ion in target_scan.get("ion_set", []):
precursors[str(ion)]["uncalibrated_mz"] = ion
precursors[str(ion)]["calibrated_mz"] = self.get_calibrated_mass(ion)
precursors[str(ion)]["theoretical_mz"] = ion
data[str(ion)] = copy.deepcopy(silac_dict)
for silac_label, silac_masses in self.mass_labels.items():
silac_shift = 0
global_mass = None
added_residues = set([])
cterm_mass = 0
nterm_mass = 0
mass_keys = list(silac_masses.keys())
if self.reporter_mode:
silac_shift = sum(mass_keys)
label_mz = silac_shift
theo_mz = silac_shift
else:
if peptide:
for label_mass, label_masses in silac_masses.items():
if "X" in label_masses:
global_mass = label_mass
if "]" in label_masses:
cterm_mass = label_mass
if "[" in label_masses:
nterm_mass = label_mass
added_residues = added_residues.union(label_masses)
labels = [
label_mass
for mod_aa in peptide
if mod_aa in label_masses
]
silac_shift += sum(labels)
else:
# no mass, just assume we have one of the labels
silac_shift += mass_keys[0]
if global_mass is not None:
silac_shift += sum(
[
global_mass
for mod_aa in peptide
if mod_aa not in added_residues
]
)
silac_shift += cterm_mass + nterm_mass
label_mz = precursor + (silac_shift / float(charge))
theo_mz = theor_mass + (silac_shift / float(charge))
precursors[silac_label]["uncalibrated_mz"] = label_mz
precursors[silac_label]["calibrated_mz"] = self.get_calibrated_mass(
label_mz
)
precursors[silac_label]["theoretical_mz"] = theo_mz
data[silac_label] = copy.deepcopy(silac_dict)
if not precursors:
precursors[""]["uncalibrated_mz"] = precursor
precursors[""]["calibrated_mz"] = self.get_calibrated_mass(precursor)
precursors[""]["theoretical_mz"] = precursor
data[""] = copy.deepcopy(silac_dict)
precursors = OrderedDict(
sorted(
precursors.items(),
key=cmp_to_key(
lambda x, y: int(
x[1]["uncalibrated_mz"] - y[1]["uncalibrated_mz"]
)
),
)
)
shift_maxes = {
i: max([j["uncalibrated_mz"], j["calibrated_mz"], j["theoretical_mz"]])
for i, j in zip(precursors.keys(), list(precursors.values())[1:])
}
lowest_precursor_mz = min(
[
label_val
for label, label_info in precursors.items()
for label_info_key, label_val in label_info.items()
if label_info_key.endswith("mz")
]
)
highest_precursor_mz = (
max(shift_maxes.values()) if shift_maxes else lowest_precursor_mz
)
# do these here, remember when you tried to do this in one line with () and spent an hour debugging it?
lowest_precursor_mz -= 5
highest_precursor_mz += 5
finished_isotopes = {i: set([]) for i in precursors.keys()}
ms_index = 0
delta = -1
theo_dist = (
calculate_theoretical_distribution(peptide=peptide.upper())
if peptide
else None
)
spacing = config.NEUTRON / float(charge)
isotope_labels = {}
isotopes_chosen = {}
last_precursors = {-1: {}, 1: {}}
# our rt might sometimes be an approximation, such as from X!Tandem which requires some transformations
initial_scan = find_scan(self.quant_msn_map, ms1)
current_scan = None
not_found = 0
if self.mrm:
mrm_label = mrm_labels.pop() if mrm_info is not None else "Light"
mass = mass if mrm_info is None else mrm_info[mrm_label]
last_peak_height = {i: defaultdict(int) for i in precursors.keys()}
low_int_isotopes = defaultdict(int)
all_data_intensity = {-1: [], 1: []}
while True:
map_to_search = (
self.quant_mrm_map[mass] if self.mrm else self.quant_msn_map
)
if current_scan is None:
current_scan = initial_scan
else:
if scans_to_quant:
current_scan = scans_to_quant.pop(0)
elif scans_to_quant is None:
current_scan = (
find_prior_scan(map_to_search, current_scan)
if delta == -1
else find_next_scan(map_to_search, current_scan)
)
else:
# we've exhausted the scans we are supposed to quantify
break
found = set([])
current_scan_intensity = 0
if current_scan is not None:
if current_scan in self.scans_to_skip:
continue
else:
if self.min_resolution:
full_scan, scan_params = self.getScan(current_scan)
# check if it's a low res scan, if so skip it
if full_scan is not None:
scan_resolution = get_scan_resolution(full_scan)
if scan_resolution < self.min_resolution:
self.scans_to_skip[current_scan] = True
continue
if self.mrm:
df = full_scan
else:
df = full_scan.ix[
(full_scan.index >= lowest_precursor_mz)
& (full_scan.index <= highest_precursor_mz)
]
else:
df, scan_params = self.getScan(
current_scan,
start=None if self.mrm else lowest_precursor_mz,
end=None if self.mrm else highest_precursor_mz,
)
if df is not None:
labels_found = set([])
xdata = df.index.values.astype(float)
ydata = df.fillna(0).values.astype(float)
iterator = (
precursors.items() if not self.mrm else [(mrm_label, 0)]
)
for precursor_label, precursor_info in iterator:
selected = {}
if self.mrm:
labels_found.add(precursor_label)
for i, j in zip(xdata, ydata):
selected[i] = j
isotope_labels[df.name] = {
"label": precursor_label,
"isotope_index": target_scan.get("product_ion", 0),
}
key = (df.name, xdata[-1])
isotopes_chosen[key] = {
"label": precursor_label,
"isotope_index": target_scan.get("product_ion", 0),
"amplitude": ydata[-1],
}
else:
uncalibrated_precursor = precursor_info[
"uncalibrated_mz"
]
measured_precursor = precursor_info["calibrated_mz"]
theoretical_precursor = precursor_info["theoretical_mz"]
data[precursor_label][
"calibrated_precursor"
] = measured_precursor
data[precursor_label][
"precursor"
] = uncalibrated_precursor
shift_max = (
shift_maxes.get(precursor_label)
if self.overlapping_mz is False
else None
)
is_fragmented_scan = (
current_scan == initial_scan
) and (precursor == measured_precursor)
envelope = peaks.findEnvelope(
xdata,
ydata,
measured_mz=measured_precursor,
theo_mz=theoretical_precursor,
max_mz=shift_max,
charge=charge,
contaminant_search=not self.parser_args.no_contaminant_detection,
precursor_ppm=self.precursor_ppm,
isotope_ppm=self.isotope_ppm,
reporter_mode=self.reporter_mode,
isotope_ppms=self.isotope_ppms
if self.fitting_run
else None,
quant_method=self.quant_method,
debug=self.debug,
theo_dist=theo_dist
if (self.mono or precursor_label not in shift_maxes)
else None,
label=precursor_label,
skip_isotopes=finished_isotopes[precursor_label],
last_precursor=last_precursors[delta].get(
precursor_label, measured_precursor
),
isotopologue_limit=self.isotopologue_limit,
fragment_scan=is_fragmented_scan,
centroid=scan_params.get("centroid", False),
)
if not envelope["envelope"]:
if self.debug:
print(
"envelope empty",
envelope,
measured_precursor,
initial_scan,
current_scan,
last_precursors,
)
if self.parser_args.msn_all_scans:
selected[measured_precursor] = 0
isotope_labels[measured_precursor] = {
"label": precursor_label,
"isotope_index": 0,
}
isotopes_chosen[
(df.name, measured_precursor)
] = {
"label": precursor_label,
"isotope_index": 0,
"amplitude": 0,
}
else:
continue
if (
not self.parser_args.msn_all_scans
and 0 in envelope["micro_envelopes"]
and envelope["micro_envelopes"][0].get("int")
):
if ms_index == 0:
last_precursors[delta * -1][
precursor_label
] = envelope["micro_envelopes"][0]["params"][1]
last_precursors[delta][precursor_label] = envelope[
"micro_envelopes"
][0]["params"][1]
added_keys = []
for isotope, vals in envelope[
"micro_envelopes"
].items():
if isotope in finished_isotopes[precursor_label]:
continue
peak_intensity = vals.get("int")
if peak_intensity == 0 or (
self.peak_cutoff
and peak_intensity
< last_peak_height[precursor_label][isotope]
* self.peak_cutoff
):
low_int_isotopes[
(precursor_label, isotope)
] += 1
if (
not self.parser_args.msn_all_scans
and low_int_isotopes[
(precursor_label, isotope)
]
>= 2
):
if self.debug:
print(
"finished with isotope",
precursor_label,
envelope,
)
finished_isotopes[precursor_label].add(
isotope
)
else:
labels_found.add(precursor_label)
continue
else:
low_int_isotopes[(precursor_label, isotope)] = 0
found.add(precursor_label)
labels_found.add(precursor_label)
if (
current_scan == initial_scan
or last_peak_height[precursor_label][isotope]
== 0
):
last_peak_height[precursor_label][
isotope
] = peak_intensity
selected[
measured_precursor + isotope * spacing
] = peak_intensity
current_scan_intensity += peak_intensity
vals["isotope"] = isotope
isotope_labels[
measured_precursor + isotope * spacing
] = {
"label": precursor_label,
"isotope_index": isotope,
}
key = (
df.name,
measured_precursor + isotope * spacing,
)
added_keys.append(key)
isotopes_chosen[key] = {
"label": precursor_label,
"isotope_index": isotope,
"amplitude": peak_intensity,
}
del envelope
selected = pd.Series(selected, name=df.name).to_frame()
if df.name in combined_data.columns:
combined_data = combined_data.add(
selected, axis="index", fill_value=0
)
else:
combined_data = pd.concat(
[combined_data, selected], axis=1, sort=True
).fillna(0)
del selected
if not self.mrm and (
(len(labels_found) < self.labels_needed)
or (
self.parser_args.require_all_ions
and len(labels_found) < len(precursors)
)
):
if self.parser_args.msn_all_scans:
if self.parser_args.require_all_ions:
if self.debug:
print(
"Not all ions found, setting",
df.name,
"to zero",
)
combined_data[df.name] = 0
else:
found.discard(precursor_label)
if df is not None and df.name in combined_data.columns:
del combined_data[df.name]
for i in isotopes_chosen.keys():
if i[0] == df.name:
del isotopes_chosen[i]
del df
all_data_intensity[delta].append(current_scan_intensity)
if not found or (
(
np.abs(ms_index) > 7
and self.low_snr(
all_data_intensity[delta], thresh=self.parser_args.xic_snr
)
)
or (
self.parser_args.xic_window_size != -1
and np.abs(ms_index) >= self.parser_args.xic_window_size
)
):
not_found += 1
if current_scan is None or (
not_found > self.xic_missing_ion_count
and not self.parser_args.msn_all_scans
):
not_found = 0
if delta == -1:
delta = 1
current_scan = initial_scan
finished_isotopes = {i: set([]) for i in precursors.keys()}
last_peak_height = {
i: defaultdict(int) for i in precursors.keys()
}
ms_index = 0
else:
if self.mrm:
if mrm_info is not None and mrm_labels:
mrm_label = (
mrm_labels.pop()
if mrm_info is not None
else "Light"
)
mass = (
mass
if mrm_info is None
else mrm_info[mrm_label]
)
delta = -1
current_scan = self.quant_mrm_map[mass][0][1]
last_peak_height = {
i: defaultdict(int) for i in precursors.keys()
}
initial_scan = current_scan
finished_isotopes = {
i: set([]) for i in precursors.keys()
}
ms_index = 0
else:
break
else:
break
else:
not_found = 0
if self.reporter_mode:
break
ms_index += delta
rt_figure = {}
isotope_figure = {}
if self.parser_args.merge_isotopes:
new_labels = {}
labels = set(v["label"] for i, v in isotope_labels.items())
for label in labels:
to_merge = [
(i, v["isotope_index"], v)
for i, v in isotope_labels.items()
if v["label"] == label
]
to_merge.sort(key=operator.itemgetter(1))
new_labels[to_merge[0][0]] = to_merge[0][2]
if len(to_merge) > 1:
combined_data.loc[to_merge[0][0], :] = combined_data.loc[
[i[0] for i in to_merge], :
].sum(axis=0)
combined_data.drop([i[0] for i in to_merge[1:]], inplace=True)
isotope_labels = new_labels
if self.parser_args.merge_labels or combine_xics:
label_name = "_".join(map(str, combined_data.index))
combined_data = combined_data.sum(axis=0).to_frame(name=label_name).T
isotope_labels = {
label_name: {"isotope_index": 0, "label": label_name,}
}
data[label_name] = {}
data[label_name]["calibrated_precursor"] = "_".join(
map(
str,
(
data[i].get("calibrated_precursor")
for i in sorted(data.keys())
if i != label_name
),
)
)
data[label_name]["precursor"] = "_".join(
map(
str,
(
data[i].get("precursor")
for i in sorted(data.keys())
if i != label_name
),
)
)
if isotopes_chosen and isotope_labels and not combined_data.empty:
if self.mrm:
combined_data = combined_data.T
# bookend with zeros if there aren't any, do the right end first because pandas will by default append there
combined_data = combined_data.sort_index().sort_index(axis="columns")
start_rt = rt
rt_guide = self.rt_guide and start_rt
if len(combined_data.columns) == 1:
if combined_data.columns[-1] == self.msn_rt_map.iloc[-1]:
new_col = combined_data.columns[-1] + (
combined_data.columns[-1] - self.msn_rt_map.iloc[-2]
)
else:
new_col = self.msn_rt_map.iloc[
self.msn_rt_map.searchsorted(combined_data.columns[-1]) + 1
]
else:
new_col = combined_data.columns[-1] + (
combined_data.columns[-1] - combined_data.columns[-2]
)
combined_data[new_col] = 0
new_col = combined_data.columns[0] - (
combined_data.columns[1] - combined_data.columns[0]
)
combined_data[new_col] = 0
combined_data = combined_data[sorted(combined_data.columns)]
combined_data = combined_data.sort_index().sort_index(axis="columns")
quant_vals = defaultdict(dict)
isotope_labels = pd.DataFrame(isotope_labels).T
isotopes_chosen = pd.DataFrame(isotopes_chosen).T
isotopes_chosen.index.names = ["RT", "MZ"]
if self.html:
# make the figure of our isotopes selected
all_x = sorted(
isotopes_chosen.index.get_level_values("MZ").drop_duplicates()
)
isotope_group = isotopes_chosen.groupby(level="RT")
isotope_figure = {
"data": [],
"plot-multi": True,
"common-x": ["x"] + all_x,
"max-y": isotopes_chosen["amplitude"].max(),
}
isotope_figure_mapper = {}
rt_figure = {
"data": [],
"plot-multi": True,
"common-x": ["x"]
+ ["{0:0.4f}".format(i) for i in combined_data.columns],
"rows": len(precursors),
"max-y": combined_data.max().max(),
}
rt_figure_mapper = {}
for counter, (index, row) in enumerate(isotope_group):
try:
title = "Scan {} RT {}".format(
self.msn_rt_map[self.msn_rt_map == index].index[0],
index,
)
except Exception as e:
title = "{}".format(index)
if index in isotope_figure_mapper:
isotope_base = isotope_figure_mapper[index]
else:
isotope_base = {
"data": {"x": "x", "columns": [], "type": "bar"},
"axis": {
"x": {"label": "M/Z"},
"y": {"label": "Intensity"},
},
}
isotope_figure_mapper[index] = isotope_base
isotope_figure["data"].append(isotope_base)
for group in precursors.keys():
label_df = row[row["label"] == group]
x = (
label_df["amplitude"]
.index.get_level_values("MZ")
.tolist()
)
y = label_df["amplitude"].values.tolist()
isotope_base["data"]["columns"].append(
["{} {}".format(title, group)]
+ [y[x.index(i)] if i in x else 0 for i in all_x]
)
if not self.reporter_mode:
combined_peaks = defaultdict(dict)
peak_location = None
# If we are searching for a particular RT, we look for it in the data and remove other larger peaks
# until we find it. To help with cases where we are fitting multiple datasets for the same XIC, we
# combine the data to increase the SNR in case some XICs of a given ion are weak
if rt_guide and not self.parser_args.msn_all_scans:
merged_data = combined_data.sum(axis=0)
merged_x = merged_data.index.astype(float).values
merged_y = merged_data.values.astype(float)
res, residual = peaks.targeted_search(
merged_x,
merged_y,
start_rt,
attempts=4,
peak_finding_kwargs=self.peak_finding_kwargs,
)
if self.debug:
if res is not None:
print("peak used for sub-fitting", res)
else:
print(peptide, "is dead")
if res is not None:
rt_means = res[1 :: self.bigauss_stepsize]
rt_amps = res[:: self.bigauss_stepsize]
rt_std = res[2 :: self.bigauss_stepsize]
rt_std2 = res[3 :: self.bigauss_stepsize]
m_std = np.std(merged_y)
m_mean = nanmean(merged_y)
valid_peaks = [
{
"mean": i,
"amp": j,
"std": l,
"std2": k,
"total": merged_y.sum(),
"snr": m_mean / m_std,
"residual": residual,
}
for i, j, l, k in zip(
rt_means, rt_amps, rt_std, rt_std2
)
]
valid_peaks.sort(key=lambda x: np.abs(x["mean"] - start_rt))
peak_index = peaks.find_nearest_index(
merged_x, valid_peaks[0]["mean"]
)
peak_location = merged_x[peak_index]
if self.debug:
print("peak location is", peak_location)
merged_lb = peaks.find_nearest_index(
merged_x,
valid_peaks[0]["mean"] - valid_peaks[0]["std"] * 2,
)
merged_rb = peaks.find_nearest_index(
merged_x,
valid_peaks[0]["mean"] + valid_peaks[0]["std2"] * 2,
)
merged_rb = (
len(merged_x) if merged_rb == -1 else merged_rb + 1
)
else:
merged_lb = 0
merged_rb = combined_data.shape[1]
peak_location = start_rt
else:
merged_x = xdata
merged_y = ydata
merged_lb = 0
merged_rb = combined_data.shape[1]
potential_peaks = defaultdict(list)
for row_num, (index, values) in enumerate(combined_data.iterrows()):
quant_label = isotope_labels.loc[index, "label"]
xdata = values.index.values.astype(float)
ydata = values.fillna(0).values.astype(float)
# Setup the HTML first in case we do not fit any peaks, we still want to report the raw data
if self.html:
# ax = fig.add_subplot(subplot_rows, subplot_columns, fig_index)
if quant_label in rt_figure_mapper:
rt_base = rt_figure_mapper[(quant_label, index)]
else:
rt_base = {
"data": {"x": "x", "columns": []},
"grid": {
"x": {
"lines": [
{
"value": rt,
"text": "Initial RT {0:0.4f}".format(
rt
),
"position": "middle",
}
]
}
},
"subchart": {"show": True},
"axis": {
"x": {"label": "Retention Time"},
"y": {"label": "Intensity"},
},
}
rt_figure_mapper[(quant_label, index)] = rt_base
rt_figure["data"].append(rt_base)
rt_base["data"]["columns"].append(
["{0} {1} raw".format(quant_label, index)]
+ ydata.tolist()
)
if sum(ydata > 0) >= self.min_scans:
# this step is to add in a term on the border if possible
# otherwise, there are no penalties on the variance if it is
# at the border since the data does not exist. We only add for lower values to avoid
# including monster peaks we may be explicitly excluding above
fit_lb = merged_lb
fit_rb = merged_rb
while (
fit_rb + 1 < len(ydata)
and ydata[fit_rb + 1] <= ydata[fit_rb - 1]
):
fit_rb += 1
while fit_lb != 0 and ydata[fit_lb] >= ydata[fit_lb - 1]:
fit_lb -= 1
peak_x = np.copy(xdata[fit_lb:fit_rb])
peak_y = np.copy(ydata[fit_lb:fit_rb])
if peak_x.size <= 1 or sum(peak_y > 0) < self.min_scans:
continue
if rt_guide:
peak_positive_y = peak_y > 0
if (
peak_location is None
and self.parser_args.msn_all_scans
):
peak_location = start_rt
nearest_positive_peak = peaks.find_nearest(
peak_x[peak_positive_y], peak_location
)
sub_peak_location = peaks.find_nearest_index(
peak_x, nearest_positive_peak
)
sub_peak_index = (
sub_peak_location
if peak_y[sub_peak_location]
else np.argmax(peak_y)
)
else:
nearest_positive_peak = None
# fit, residual = peaks.fixedMeanFit2(peak_x, peak_y, peak_index=sub_peak_index, debug=self.debug)
if self.debug:
print("fitting XIC for", quant_label, index)
print("raw data is", xdata.tolist(), ydata.tolist())
fit, residual = peaks.findAllPeaks(
xdata,
ydata,
bigauss_fit=True,
filter=self.filter_peaks,
rt_peak=nearest_positive_peak,
**self.peak_finding_kwargs
)
if not fit.any():
continue
rt_amps = fit[:: self.bigauss_stepsize] # * ydata.max()
rt_means = fit[1 :: self.bigauss_stepsize]
rt_std = fit[2 :: self.bigauss_stepsize]
rt_std2 = fit[3 :: self.bigauss_stepsize]
xic_peaks = []
positive_y = ydata[ydata > 0]
if len(positive_y) > 5:
positive_y = gaussian_filter1d(
positive_y, 3, mode="constant"
)
for i, j, l, k in zip(rt_means, rt_amps, rt_std, rt_std2):
d = {
"mean": i,
"amp": j,
"std": l,
"std2": k,
"total": values.sum(),
"residual": residual,
}
mean_index = peaks.find_nearest_index(
xdata[ydata > 0], i
)
window_size = (
5
if len(positive_y) < 15
else int(len(positive_y) / 3)
)
lb, rb = (
mean_index - window_size,
mean_index + window_size + 1,
)
if lb < 0:
lb = 0
if rb > len(positive_y):
rb = -1
data_window = positive_y[lb:rb]
if data_window.any():
try:
background = np.percentile(data_window, 0.8)
except Exception as e:
background = np.percentile(ydata, 0.8)
mean = nanmean(data_window)
if background < mean:
background = mean
d["sbr"] = nanmean(
j
/ (
np.array(
sorted(data_window, reverse=True)[:5]
)
)
) # (j-np.mean(positive_y[lb:rb]))/np.std(positive_y[lb:rb])
if len(data_window) > 2:
d["snr"] = (j - background) / np.std(
data_window
)
else:
d["snr"] = np.NaN
else:
d["sbr"] = np.NaN
d["snr"] = np.NaN
xic_peaks.append(d)
potential_peaks[(quant_label, index)] = xic_peaks
# if we have a peaks containing our retention time, keep them and throw out ones not containing it
if (
self.parser_args.peak_resolution_mode
== PEAK_RESOLUTION_RT_MODE
):
to_remove = []
to_keep = []
if rt_guide:
peak_location_index = peaks.find_nearest_index(
merged_x, peak_location
)
for i, v in enumerate(xic_peaks):
mu = v["mean"]
s1 = v["std"]
s2 = v["std2"]
if mu - s1 * 2 < start_rt < mu + s2 * 2:
# these peaks are considered true and will help with the machine learning
if mu - s1 * 1.5 < start_rt < mu + s2 * 1.5:
v["valid"] = True
to_keep.append(i)
elif (
np.abs(
peaks.find_nearest_index(merged_x, mu)
- peak_location_index
)
> 2
):
to_remove.append(i)
if not to_keep:
# we have no peaks with our RT, there are contaminating peaks, remove all the noise but the closest to our RT
if not self.mrm:
# for i in to_remove:
# xic_peaks[i]['interpolate'] = True
valid_peak = sorted(
[
(i, np.abs(i["mean"] - start_rt))
for i in xic_peaks
],
key=operator.itemgetter(1),
)[0][0]
for i in reversed(range(len(xic_peaks))):
if xic_peaks[i] == valid_peak:
continue
else:
del xic_peaks[i]
# valid_peak['interpolate'] = True
# else:
# valid_peak = [j[0] for j in sorted([(i, i['amp']) for i in xic_peaks], key=operator.itemgetter(1), reverse=True)[:3]]
else:
# if not to_remove:
# xic_peaks = [xic_peaks[i] for i in to_keep]
# else:
for i in reversed(to_remove):
del xic_peaks[i]
if self.debug:
print(quant_label, index)
print(fit)
print(to_remove, to_keep, xic_peaks)
combined_peaks[quant_label][
index
] = xic_peaks # if valid_peak is None else [valid_peak]
peak_info = (
{i: {} for i in self.mrm_pair_info.columns}
if self.mrm
else {i: {} for i in precursors.keys()}
)
if self.reporter_mode or combined_peaks:
if self.reporter_mode:
for row_num, (index, values) in enumerate(
combined_data.iterrows()
):
quant_label = isotope_labels.loc[index, "label"]
isotope_index = isotope_labels.loc[index, "isotope_index"]
int_val = sum(values)
quant_vals[quant_label][isotope_index] = int_val
else:
# common_peak = self.replaceOutliers(combined_peaks, combined_data, debug=self.debug)
common_peak = find_common_peak_mean(
combined_peaks, tie_breaker_time=start_rt
)
common_loc = peaks.find_nearest_index(
xdata, common_peak
) # np.where(xdata==common_peak)[0][0]
for quant_label, quan_values in combined_peaks.items():
for index, values in quan_values.items():
if not values:
continue
isotope_index = isotope_labels.loc[
index, "isotope_index"
]
rt_values = combined_data.loc[index]
xdata = rt_values.index.values.astype(float)
ydata = rt_values.fillna(0).values.astype(float)
# pick the biggest within a rt cutoff of 0.2, otherwise pick closest
# closest_rts = sorted([(i, i['amp']) for i in values if np.abs(i['peak']-common_peak) < 0.2], key=operator.itemgetter(1), reverse=True)
closest_rts = sorted(
[
(i, np.abs(i["mean"] - common_peak))
for i in values
],
key=operator.itemgetter(1),
)
xic_peaks = [i[0] for i in closest_rts]
pos_x = xdata[ydata > 0]
if rt_guide:
xic_peaks = [xic_peaks[0]]
else:
# unguided, sort by amplitude
xic_peaks.sort(
key=operator.itemgetter("amp"), reverse=True
)
for xic_peak_index, xic_peak in enumerate(xic_peaks):
if (
self.peaks_n != -1
and xic_peak_index >= self.peaks_n
): # xic_peak index is 0 based, peaks_n is 1 based, hence the >=
break
# if we move more than a # of ms1 to the dominant peak, update to our known peak
gc = "k"
nearest = peaks.find_nearest_index(
pos_x, xic_peak["mean"]
)
peak_loc = np.where(xdata == pos_x[nearest])[0][0]
mean = xic_peak["mean"]
amp = xic_peak["amp"]
mean_diff = mean - xdata[common_loc]
mean_diff = np.abs(
mean_diff / xic_peak["std"]
if mean_diff < 0
else mean_diff / xic_peak["std2"]
)
std = xic_peak["std"]
std2 = xic_peak["std2"]
snr = xic_peak["snr"]
sbr = xic_peak["sbr"]
residual = xic_peak["residual"]
if (
False
and len(xdata) >= 3
and (
mean_diff > 2
or (
np.abs(peak_loc - common_loc) > 2
and mean_diff > 2
)
)
):
# fixed mean fit
if self.debug:
print(quant_label, index)
print(common_loc, peak_loc)
nearest = peaks.find_nearest_index(pos_x, mean)
nearest_index = np.where(
xdata == pos_x[nearest]
)[0][0]
res = peaks.fixedMeanFit(
xdata,
ydata,
peak_index=nearest_index,
debug=self.debug,
)
if res is None:
if self.debug:
print(
quant_label,
index,
"has no values here",
)
continue
amp, mean, std, std2 = res
amp *= ydata.max()
gc = "g"
# var_rat = closest_rt['var']/common_var
peak_params = np.array([amp, mean, std, std2])
# int_args = (res.x[rt_index]*mval, res.x[rt_index+1], res.x[rt_index+2])
left, right = (
xdata[0] - 4 * std,
xdata[-1] + 4 * std2,
)
xr = np.linspace(left, right, 1000)
left_index, right_index = (
peaks.find_nearest_index(xdata, left),
peaks.find_nearest_index(xdata, right) + 1,
)
if left_index < 0:
left_index = 0
if right_index >= len(xdata) or right_index <= 0:
right_index = len(xdata)
# check that we have at least 2 positive values
if sum(ydata[left_index:right_index] > 0) < 2:
continue
try:
int_val = (
integrate.simps(
peaks.bigauss_ndim(xr, peak_params),
x=xr,
)
if self.quant_method == "integrate"
else ydata[
(xdata > left) & (xdata < right)
].sum()
)
except Exception as e:
if self.debug:
print(traceback.format_exc())
print(xr, peak_params)
try:
total_int = integrate.simps(
ydata[left_index:right_index],
x=xdata[left_index:right_index],
)
except Exception as e:
if self.debug:
print(traceback.format_exc())
print(left_index, right_index, xdata, ydata)
sdr = np.log2(int_val * 1.0 / total_int + 1.0)
if int_val and not pd.isnull(int_val) and gc != "c":
try:
quant_vals[quant_label][
isotope_index
] += int_val
except KeyError:
try:
quant_vals[quant_label][
isotope_index
] = int_val
except KeyError:
quant_vals[quant_label] = {
isotope_index: int_val
}
cleft, cright = mean - 2 * std, mean + 2 * std2
curve_indices = (xdata >= cleft) & (xdata <= cright)
cf_data = ydata[curve_indices]
# Buffer cf_data with 0's to reflect that the data is nearly zero outside the fit
# and to prevent areas with 2 data points from having negative R^2
cf_data = np.hstack((0, cf_data, 0))
ss_tot = np.sum((cf_data - nanmean(cf_data)) ** 2)
if ss_tot == 0:
continue
ss_res = np.sum(
(
cf_data
- np.hstack(
(
0,
peaks.bigauss_ndim(
xdata[curve_indices],
peak_params,
),
0,
)
)
)
** 2
)
coef_det = 1 - ss_res / ss_tot
peak_info_dict = {
"peak_mean": mean,
"std": std,
"std2": std2,
"amp": amp,
"mean_diff": mean_diff,
"snr": snr,
"sbr": sbr,
"sdr": sdr,
"auc": int_val,
"peak_width": std + std2,
"coef_det": coef_det,
"residual": residual,
"label": quant_label,
}
try:
peak_info[quant_label][isotope_index][
xic_peak_index
] = peak_info_dict
except KeyError:
try:
peak_info[quant_label][isotope_index] = {
xic_peak_index: peak_info_dict
}
except KeyError:
peak_info[quant_label] = {
isotope_index: {
xic_peak_index: peak_info_dict
}
}
try:
data[quant_label]["residual"].append(residual)
except KeyError:
data[quant_label]["residual"] = [residual]
if self.html:
rt_base = rt_figure_mapper[(quant_label, index)]
key = "{} {}".format(quant_label, index)
for i, v in enumerate(
rt_base["data"]["columns"]
):
if key in v[0]:
break
rt_base["data"]["columns"].insert(
i,
[
"{0} {1} fit {2}".format(
quant_label, index, xic_peak_index
)
]
+ np.nan_to_num(
peaks.bigauss_ndim(xdata, peak_params)
).tolist(),
)
del combined_peaks
write_html = True if self.ratio_cutoff == 0 else False
# # Some experimental code that tries to compare the XIC with the theoretical distribution
# # Currently disabled as it reduces the number of datapoints to infer SILAC ratios and results in poorer
# # comparisons -- though there might be merit to intensity based estimates with this.
if self.parser_args.theo_xic and self.mono and theo_dist is not None:
# Compare the extracted XIC with the theoretical abundance of each isotope:
# To do this, we take the residual of all combinations of isotopes
for quant_label in quant_vals:
isotopes = quant_vals[quant_label].keys()
isotope_ints = {i: quant_vals[quant_label][i] for i in isotopes}
isotope_residuals = []
for num_of_isotopes in range(2, len(isotopes) + 1):
for combo in combinations(isotopes, num_of_isotopes):
chosen_isotopes = np.array(
[isotope_ints[i] for i in combo]
)
chosen_isotopes /= chosen_isotopes.max()
chosen_dist = np.array([theo_dist[i] for i in combo])
chosen_dist /= chosen_dist.max()
res = sum((chosen_dist - chosen_isotopes) ** 2)
isotope_residuals.append((res, combo))
# this weird sorting is to put the favorable values as the lowest values
if isotope_residuals:
kept_keys = sorted(
isotope_residuals,
key=lambda x: (
0 if x[0] < 0.1 else 1,
len(isotopes) - len(x[1]),
x[0],
),
)[0][1]
# print(quant_label, kept_keys)
for i in isotopes:
if i not in kept_keys:
del quant_vals[quant_label][i]
for silac_label1 in data.keys():
# TODO: check if peaks overlap before taking ratio
qv1 = quant_vals.get(silac_label1, {})
result_dict.update(
{"{}_intensity".format(silac_label1): sum(qv1.values())}
)
if self.report_ratios:
for silac_label2 in data.keys():
if (
self.ref_label is not None
and str(silac_label2.lower()) != self.ref_label.lower()
):
continue
if silac_label1 == silac_label2:
continue
qv2 = quant_vals.get(silac_label2, {})
ratio = "NA"
if qv1 is not None and qv2 is not None:
if self.mono:
common_isotopes = set(qv1.keys()).intersection(
qv2.keys()
)
x = []
y = []
l1, l2 = 0, 0
for i in common_isotopes:
q1 = qv1.get(i)
q2 = qv2.get(i)
if (
q1 > 100
and q2 > 100
and q1 > l1 * 0.15
and q2 > l2 * 0.15
):
x.append(i)
y.append(q1 / q2)
l1, l2 = q1, q2
# fit it and take the intercept
if len(x) >= 3 and np.std(np.log2(y)) > 0.3:
classifier = EllipticEnvelope(
contamination=0.25, random_state=0
)
fit_data = np.log2(
np.array(y).reshape(len(y), 1)
)
true_pred = (True, 1)
classifier.fit(fit_data)
ratio = nanmean(
[
y[i]
for i, v in enumerate(
classifier.predict(fit_data)
)
if v in true_pred
]
)
else:
ratio = nanmean(np.array(y))
else:
common_isotopes = set(qv1.keys()).union(qv2.keys())
quant1 = sum(
[qv1.get(i, 0) for i in common_isotopes]
)
quant2 = sum(
[qv2.get(i, 0) for i in common_isotopes]
)
ratio = (
quant1 / quant2 if quant1 and quant2 else "NA"
)
try:
if (
self.ratio_cutoff
and not pd.isnull(ratio)
and np.abs(np.log2(ratio)) > self.ratio_cutoff
):
write_html = True
except Exception as e:
pass
result_dict.update(
{
"{}_{}_ratio".format(
silac_label1, silac_label2
): ratio
}
)
if write_html:
result_dict.update({"html_info": html_images})
for peak_label, peak_data in peak_info.items():
result_dict.update(
{
"{}_peaks".format(peak_label): peak_data,
"{}_isotopes".format(peak_label): sum(
(isotopes_chosen["label"] == peak_label)
& (isotopes_chosen["amplitude"] > 0)
),
}
)
for silac_label, silac_data in data.items():
precursor = silac_data["precursor"]
calc_precursor = silac_data.get(
"calibrated_precursor", silac_data["precursor"]
)
result_dict.update(
{
"{}_residual".format(silac_label): nanmean(
pd.Series(silac_data.get("residual", [])).replace(
[np.inf, -np.inf, np.nan], 0
)
),
"{}_precursor".format(silac_label): precursor,
"{}_calibrated_precursor".format(silac_label): calc_precursor,
}
)
result_dict.update(
{
"ions_found": target_scan.get("ions_found"),
"html": {"xic": rt_figure, "isotope": isotope_figure,},
}
)
self.results.put(result_dict)
except Exception as e:
print(
"ERROR encountered. Please report at https://github.com/Chris7/pyquant/issues:\n {}\nParameters: {}".format(
traceback.format_exc(), params
)
)
try:
self.results.put(result_dict)
except Exception as e:
pass
return
def run(self):
for index, params in enumerate(iter(self.queue.get, None)):
self.params = params
self.quantify_peaks(params)
self.results.put(None)
|
mit
|
taylorhxu/neurosynth
|
neurosynth/analysis/cluster.py
|
1
|
10067
|
from copy import deepcopy
import numpy as np
from six import string_types
from sklearn import decomposition as sk_decomp
from sklearn import cluster as sk_cluster
from sklearn.metrics import pairwise_distances
from os.path import exists, join
from os import makedirs
from nibabel import nifti1
from neurosynth.analysis import meta
class Clusterable(object):
'''
Args:
dataset: The Dataset instance to extract data from.
mask: A mask defining the voxels to cluster. Can be a filename,
nibabel image, or numpy array (see Masker.mask() for details).
features (str or list): Optional string or list of strings specifying
any feature names to use for study selection. E.g., passing
['emotion', 'reward'] would retain for analysis only those studies
associated with the features emotion or reward at a frequency
greater than feature_threshold.
feature_threshold (float): The threshold to use when selecting studies
on the basis of features.
min_voxels_per_study (int): Minimum number of active voxels a study
must report in order to be retained in the dataset. By default,
all studies are used.
min_studies_per_voxel (int): Minimum number of studies a voxel must be
active in in order to be retained in analysis. By default, all
voxels are used.
'''
def __init__(self, dataset, mask=None, features=None,
feature_threshold=None, min_voxels=None, min_studies=None):
self.dataset = dataset
self.masker = deepcopy(dataset.masker)
# Condition study inclusion on specific features
if features is not None:
ids = dataset.get_studies(features=features,
frequency_threshold=feature_threshold)
data = dataset.get_image_data(ids, dense=False)
else:
data = dataset.image_table.data
# Trim data based on minimum number of voxels or studies
if min_studies is not None:
av = self.masker.unmask(
data.sum(1) >= min_studies, output='vector')
self.masker.add({'voxels': av})
if min_voxels is not None:
data = data[:, np.array(data.sum(0) >= min_voxels).squeeze()]
if mask is not None:
self.masker.add({'roi': mask})
self.data = data[self.masker.get_mask(['voxels', 'roi']), :].toarray()
def transform(self, transformer, transpose=False):
''' Apply a transformation to the Clusterable instance. Accepts any
scikit-learn-style class that implements a fit_transform() method. '''
data = self.data.T if transpose else self.data
self.data = transformer.fit_transform(data)
return self
def magic(dataset, method='coactivation', roi_mask=None,
coactivation_mask=None, features=None, feature_threshold=0.05,
min_voxels_per_study=None, min_studies_per_voxel=None,
reduce_reference='pca', n_components=100,
distance_metric='correlation', clustering_algorithm='kmeans',
n_clusters=5, clustering_kwargs={}, output_dir=None, filename=None,
coactivation_images=False, coactivation_threshold=0.1):
''' Execute a full clustering analysis pipeline.
Args:
dataset: a Dataset instance to extract all data from.
method (str): the overall clustering approach to use. Valid options:
'coactivation' (default): Clusters voxel within the ROI mask based
on shared pattern of coactivation with the rest of the brain.
'studies': Treat each study as a feature in an n-dimensional space.
I.e., voxels will be assigned to the same cluster if they tend
to be co-reported in similar studies.
roi_mask: A string, nibabel image, or numpy array providing an
inclusion mask of voxels to cluster. If None, the default mask
in the Dataset instance is used (typically, all in-brain voxels).
coactivation_mask: If method='coactivation', this mask defines the
voxels to use when generating the pairwise distance matrix. For
example, if a PFC mask is passed, all voxels in the roi_mask will
be clustered based on how similar their patterns of coactivation
with PFC voxels are. Can be a str, nibabel image, or numpy array.
features (str or list): Optional string or list of strings specifying
any feature names to use for study selection. E.g., passing
['emotion', 'reward'] would retain for analysis only those studies
associated with the features emotion or reward at a frequency
greater than feature_threshold.
feature_threshold (float): The threshold to use when selecting studies
on the basis of features.
min_voxels_per_study (int): Minimum number of active voxels a study
must report in order to be retained in the dataset. By default,
all studies are used.
min_studies_per_voxel (int): Minimum number of studies a voxel must be
active in in order to be retained in analysis. By default, all
voxels are used.
reduce_reference (str, scikit-learn object or None): The dimensionality
reduction algorithm to apply to the feature space prior to the
computation of pairwise distances. If a string is passed (either
'pca' or 'ica'), n_components must be specified. If None, no
dimensionality reduction will be applied. Otherwise, must be a
scikit-learn-style object that exposes a transform() method.
n_components (int): Number of components to extract during the
dimensionality reduction step. Only used if reduce_reference is
a string.
distance_metric (str): The distance metric to use when computing
pairwise distances on the to-be-clustered voxels. Can be any of the
metrics supported by sklearn.metrics.pairwise_distances.
clustering_algorithm (str or scikit-learn object): the clustering
algorithm to use. If a string, must be one of 'kmeans' or 'minik'.
Otherwise, any sklearn class that exposes a fit_predict() method.
n_clusters (int): If clustering_algorithm is a string, the number of
clusters to extract.
clustering_kwargs (dict): Additional keywords to pass to the clustering
object.
output_dir (str): The directory to write results to. If None (default),
returns the cluster label image rather than saving to disk.
filename (str): Name of cluster label image file. Defaults to
cluster_labels_k{k}.nii.gz, where k is the number of clusters.
coactivation_images (bool): If True, saves a meta-analytic coactivation
map for every ROI in the resulting cluster map.
coactivation_threshold (float or int): If coactivation_images is True,
this is the threshold used to define whether or not a study is
considered to activation within a cluster ROI. Integer values are
interpreted as minimum number of voxels within the ROI; floats
are interpreted as the proportion of voxels. Defaults to 0.1 (i.e.,
10% of all voxels within ROI must be active).
'''
roi = Clusterable(dataset, roi_mask, min_voxels=min_voxels_per_study,
min_studies=min_studies_per_voxel, features=features,
feature_threshold=feature_threshold)
if method == 'coactivation':
reference = Clusterable(dataset, coactivation_mask,
min_voxels=min_voxels_per_study,
min_studies=min_studies_per_voxel,
features=features,
feature_threshold=feature_threshold)
elif method == 'studies':
reference = roi
if reduce_reference is not None:
if isinstance(reduce_reference, string_types):
reduce_reference = {
'pca': sk_decomp.RandomizedPCA,
'ica': sk_decomp.FastICA
}[reduce_reference](n_components)
transpose = (method == 'coactivation')
reference = reference.transform(reduce_reference, transpose=transpose)
if transpose:
reference.data = reference.data.T
distances = pairwise_distances(roi.data, reference.data,
metric=distance_metric)
# TODO: add additional clustering methods
if isinstance(clustering_algorithm, string_types):
clustering_algorithm = {
'kmeans': sk_cluster.KMeans,
'minik': sk_cluster.MiniBatchKMeans
}[clustering_algorithm](n_clusters, **clustering_kwargs)
labels = clustering_algorithm.fit_predict(distances) + 1.
header = roi.masker.get_header()
header['cal_max'] = labels.max()
header['cal_min'] = labels.min()
voxel_labels = roi.masker.unmask(labels)
img = nifti1.Nifti1Image(voxel_labels, None, header)
if output_dir is not None:
if not exists(output_dir):
makedirs(output_dir)
if filename is None:
filename = 'cluster_labels_k%d.nii.gz' % n_clusters
outfile = join(output_dir, filename)
img.to_filename(outfile)
# Write coactivation images
if coactivation_images:
for l in np.unique(voxel_labels):
roi_mask = np.copy(voxel_labels)
roi_mask[roi_mask != l] = 0
ids = dataset.get_studies(
mask=roi_mask, activation_threshold=coactivation_threshold)
ma = meta.MetaAnalysis(dataset, ids)
ma.save_results(output_dir=join(output_dir, 'coactivation'),
prefix='cluster_%d_coactivation' % l)
else:
return img
|
mit
|
talespaiva/folium
|
examples/folium_vincent_markers.py
|
1
|
1799
|
# -*- coding: utf-8 -*-
'''Folium Vincent plotting'''
import pandas as pd
import vincent
import folium
NOAA_46041 = pd.read_csv(r'NOAA_46041.csv', index_col=3,
parse_dates=True)
NOAA_46050 = pd.read_csv(r'NOAA_46050_WS.csv', index_col=3,
parse_dates=True)
NOAA_46243 = pd.read_csv(r'NOAA_46243.csv', index_col=3,
parse_dates=True)
NOAA_46041 = NOAA_46041.dropna()
# Binned wind speeds for NOAA 46050.
bins = range(0, 13, 1)
cuts = pd.cut(NOAA_46050['wind_speed_cwind (m/s)'], bins)
ws_binned = pd.value_counts(cuts).reindex(cuts.levels)
# NOAA 46401 Wave Period.
vis1 = vincent.Line(NOAA_46041['dominant_wave_period (s)'],
width=400, height=200)
vis1.axis_titles(x='Time', y='Dominant Wave Period (s)')
vis1.to_json('vis1.json')
# NOAA 46050 Binned Wind Speed.
vis2 = vincent.Bar(ws_binned, width=400, height=200)
vis2.axis_titles(x='Wind Speed (m/s)', y='# of Obs')
vis2.to_json('vis2.json')
# NOAA 46243 Wave Height.
vis3 = vincent.Area(NOAA_46243['significant_wave_height (m)'],
width=400, height=200)
vis3.axis_titles(x='Time', y='Significant Wave Height (m)')
vis3.to_json('vis3.json')
# Map all buoys.
buoy_map = folium.Map(location=[46.3014, -123.7390], zoom_start=7,
tiles='Stamen Terrain')
buoy_map.polygon_marker(location=[47.3489, -124.708], fill_color='#43d9de',
radius=12, popup=(vis1, 'vis1.json'))
buoy_map.polygon_marker(location=[44.639, -124.5339], fill_color='#43d9de',
radius=12, popup=(vis2, 'vis2.json'))
buoy_map.polygon_marker(location=[46.216, -124.1280], fill_color='#43d9de',
radius=12, popup=(vis3, 'vis3.json'))
buoy_map.save(outfile='NOAA_buoys.html')
|
mit
|
gyglim/Recipes
|
papers/preactivation_and_wide_resnet/test_model.py
|
3
|
2454
|
"""
Lasagne implementation of CIFAR-10 examples from "Identity Mappings in Deep Residual Networks" (https://arxiv.org/abs/1603.05027) and "Wide Residual Networks" (https://arxiv.org/abs/1605.07146)
"""
import os
import sys
import gzip
import time
import pickle
import datetime
import random
import numpy as np
import pandas as pd
import theano
from theano import tensor as T
import lasagne
from lasagne.updates import nesterov_momentum, adam
from lasagne.layers import helper
from utils import load_pickle_data_test
variant = sys.argv[1] if len(sys.argv) > 1 else 'normal'
depth = int(sys.argv[2]) if len(sys.argv) > 2 else 18
width = int(sys.argv[3]) if len(sys.argv) > 3 else 1
print 'Using %s ResNet with depth %d and width %d.'%(variant,depth,width)
if variant == 'normal':
from models import ResNet_FullPreActivation as ResNet
elif variant == 'bottleneck':
from models import ResNet_BottleNeck_FullPreActivation as ResNet
elif variant == 'wide':
from models import ResNet_FullPre_Wide as ResNet
else:
print ('Unsupported model %s' % variant)
BATCHSIZE = 1
'''
Set up all theano functions
'''
X = T.tensor4('X')
Y = T.ivector('y')
# set up theano functions to generate output by feeding data through network, any test outputs should be deterministic
# load model
if width > 1:
output_layer = ResNet(X, n=depth, k=width)
else:
output_layer = ResNet(X, n=depth)
output_test = lasagne.layers.get_output(output_layer, deterministic=True)
output_class = T.argmax(output_test, axis=1)
# set up training and prediction functions
predict_proba = theano.function(inputs=[X], outputs=output_test)
predict_class = theano.function(inputs=[X], outputs=output_class)
'''
Load data and make predictions
'''
test_X, test_y = load_pickle_data_test()
# load network weights
f = gzip.open('data/weights/%s%d_resnet.pklz'%(variant,depth), 'rb')
all_params = pickle.load(f)
f.close()
helper.set_all_param_values(output_layer, all_params)
#make predictions
pred_labels = []
for j in range((test_X.shape[0] + BATCHSIZE - 1) // BATCHSIZE):
sl = slice(j * BATCHSIZE, (j + 1) * BATCHSIZE)
X_batch = test_X[sl]
pred_labels.extend(predict_class(X_batch))
pred_labels = np.array(pred_labels)
print pred_labels.shape
'''
Compare differences
'''
same = 0
for i in range(pred_labels.shape[0]):
if test_y[i] == pred_labels[i]:
same += 1
print'Accuracy on the testing set, ', (float(same) / float(pred_labels.shape[0]))
|
mit
|
southpaw94/MachineLearning
|
HPTuning/roc_curve.py
|
1
|
3220
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_curve, auc, roc_auc_score, \
accuracy_score
from scipy import interp
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
# This program does the same thing as SKPipeline.py, but uses
# the built in sklearn function cross_val_score which removes
# the necessity for the custom iterations through each fold.
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'+\
'/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
# All malignant tumors will be represented as class 1, otherwise, class 0
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size=0.20, random_state=1)
X_train2 = X_train[:, [4, 14]]
cv = StratifiedKFold(y_train, \
n_folds = 3, \
random_state = 1)
fig = plt.figure(figsize = (7, 5))
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
# Creates a Logistic Regression pipeline, first the data is scaled, then
# the data is put through a principle component analysis which returns the
# two principle components, and finally the remaining data undergoes
# logistic regression to classify the data
pipe_lr = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), \
('clf', LogisticRegression(random_state=1))])
pipe_lr.fit(X_train, y_train)
for i, (train, test) in enumerate(cv):
probas = pipe_lr.fit(X_train2[train], y_train[train] \
).predict_proba(X_train2[test])
fpr, tpr, thresholds = roc_curve(y_train[test], \
probas[:, 1], \
pos_label = 1)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label = 'ROC fold %d (area = %0.2f)' \
% (i + 1, roc_auc))
plt.plot([0, 1], \
[0, 1], \
linestyle = '--', \
color=(0.6, 0.6, 0.6), \
label = 'random guessing')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--', \
label='mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.plot([0, 0, 1], \
[0, 1, 1], \
lw = 2, \
linestyle=':', \
color='black', \
label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.title('Receiver Operator Characteristic')
plt.legend(loc="lower right")
plt.show()
pipe_svc = Pipeline([('scl', StandardScaler()), \
('clf', SVC(random_state=1))])
pipe_svc = pipe_svc.fit(X_train2, y_train)
y_pred2 = pipe_svc.predict(X_test[:, [4, 14]])
print('ROC AUC: %.3f' % roc_auc_score( \
y_true=y_test, y_score=y_pred2))
print('Accuracy: %.3f' % accuracy_score( \
y_true = y_test, y_pred = y_pred2))
|
gpl-2.0
|
vizual54/MissionPlanner
|
Lib/site-packages/numpy/core/function_base.py
|
82
|
5474
|
__all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
|
gpl-3.0
|
ds-hwang/deeplearning_udacity
|
udacity_notebook/5_word2vec.py
|
1
|
15284
|
# coding: utf-8
# Deep Learning
# =============
#
# Assignment 5
# ------------
#
# The goal of this assignment is to train a Word2Vec skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data.
# In[ ]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
# Download the data from the source website if necessary.
# In[ ]:
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
# In[ ]:
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
# 17005207
print('Data size %d' % len(words))
# Build the dictionary and replace rare words with UNK token.
# In[ ]:
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words # Hint to reduce memory.
# Function to generate a training batch for the skip-gram model.
# In[ ]:
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
print('data:', [reverse_dictionary[di] for di in data[:8]])
for num_skips, skip_window in [(2, 1), (4, 2)]:
data_index = 0
batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
# ('Most common words (+UNK)', [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)])
# ('Sample data', [5239, 3084, 12, 6, 195, 2, 3137, 46, 59, 156])
# ('data:', ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first'])
#
# with num_skips = 2 and skip_window = 1:
# (' batch:', ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term'])
# (' labels:', ['anarchism', 'as', 'originated', 'a', 'as', 'term', 'a', 'of'])
#
# with num_skips = 4 and skip_window = 2:
# (' batch:', ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a'])
# (' labels:', ['originated', 'term', 'anarchism', 'a', 'originated', 'of', 'as', 'term'])
# Train a skip-gram model.
# In[ ]:
def run_skip_gram():
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# random 16 samples
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
# vocabulary_size = 50000, embedding_size = 128
# embeddings == U (i.e. input vector)
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# weights == V (i.e. output vector)
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
# cosine distance
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
# In[ ]:
data_index = 0
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1] # - for descending
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
# In[ ]:
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
# In[ ]:
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)
# run_skip_gram()
# ---
#
# Problem
# -------
#
# An alternative to skip-gram is another Word2Vec model called [CBOW](http://arxiv.org/abs/1301.3781) (Continuous Bag of Words).
# In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context.
# Implement and evaluate a CBOW model trained on the text8 dataset.
#
# ---
def run_CBOW():
# according to http://cs224d.stanford.edu/lectures/CS224d-Lecture3.pdf
# 17005207 (i.e 17M) -> 1B
skip_window = 3
num_skips = 6
embedding_size = 300
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# random 16 samples
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
# vocabulary_size = 50000, embedding_size = 128
# embeddings == U (i.e. input vector)
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# weights == V (i.e. output vector)
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
train_labels, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(softmax_weights), 1, keep_dims=True))
normalized_embeddings = softmax_weights / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
# cosine distance
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
# In[ ]:
data_index = 0
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_center, batch_surround = generate_batch(
batch_size, num_skips, skip_window)
batch_surround = batch_surround.reshape(batch_size)
batch_center = batch_center[:,None]
feed_dict = {train_dataset : batch_surround, train_labels : batch_center}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1] # - for descending
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
# In[ ]:
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
# In[ ]:
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)
run_CBOW()
|
mit
|
rsivapr/scikit-learn
|
examples/cluster/plot_mean_shift.py
|
12
|
1773
|
"""
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
pl.plot(X[my_members, 0], X[my_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
|
bsd-3-clause
|
nfsli926/tushare
|
tushare/datayes/equity.py
|
17
|
6857
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Equity():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Equ(self, equTypeCD='', secID='', ticker='', listStatusCD='', field=''):
"""
获取股票的基本信息,包含股票交易代码及其简称、股票类型、上市状态、上市板块、上市日期等;上市状态为最新数据,不显示历史变动信息。
"""
code, result = self.client.getData(vs.EQU%(equTypeCD, secID, ticker, listStatusCD, field))
return _ret_data(code, result)
def EquAllot(self, isAllotment='', secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取股票历次配股的基本信息,包含每次配股方案的内容、方案进度、历史配股预案公布次数以及最终是否配股成功。
"""
code, result = self.client.getData(vs.EQUALLOT%(isAllotment, secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def EquDiv(self, eventProcessCD='', exDivDate='', secID='', ticker='', beginDate='',
endDate='', field=''):
"""
获取股票历次分红(派现、送股、转增股)的基本信息,包含历次分红预案的内容、实施进展情况以及历史宣告分红次数。
"""
code, result = self.client.getData(vs.EQUDIV%(eventProcessCD, exDivDate,
secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def EquIndustry(self, industry='', industryID='', industryVersionCD='', secID='',
ticker='', intoDate='', field=''):
"""
输入证券ID或股票交易代码,获取股票所属行业分类
"""
code, result = self.client.getData(vs.EQUINDUSTRY%(industry, industryID, industryVersionCD,
secID, ticker, intoDate, field))
return _ret_data(code, result)
def EquIPO(self, eventProcessCD='', secID='', ticker='', field=''):
"""
获取股票首次公开发行上市的基本信息,包含股票首次公开发行的进程及发行结果。
"""
code, result = self.client.getData(vs.EQUIPO%(eventProcessCD, secID, ticker, field))
return _ret_data(code, result)
def EquRef(self, secID='', ticker='', beginDate='', endDate='', eventProcessCD='', field=''):
"""
获取股票股权分置改革的基本信息,包含股改进程、股改实施方案以及流通股的变动情况。
"""
code, result = self.client.getData(vs.EQUREF%(secID, ticker, beginDate, endDate,
eventProcessCD, field))
return _ret_data(code, result)
def EquRetud(self, listStatusCD='', secID='', ticker='', beginDate='',
dailyReturnNoReinvLower='', dailyReturnNoReinvUpper='',
dailyReturnReinvLower='', dailyReturnReinvUpper='',
endDate='', isChgPctl='', field=''):
"""
获取股票每日回报率的基本信息,包含交易当天的上市状态、日行情以及除权除息事项的基本数据。
"""
code, result = self.client.getData(vs.EQURETUD%(listStatusCD, secID, ticker,
beginDate, dailyReturnNoReinvLower,
dailyReturnNoReinvUpper,
dailyReturnReinvLower,
dailyReturnReinvUpper,
endDate, isChgPctl, field))
return _ret_data(code, result)
def EquSplits(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取股票进行股本拆细或者缩股的基本信息。
"""
code, result = self.client.getData(vs.EQUSPLITS%(secID, ticker, beginDate,
endDate, field))
return _ret_data(code, result)
def FstTotal(self, beginDate='', endDate='', exchangeCD='', field=''):
"""
获取上海、深圳交易所公布的每个交易日的融资融券交易汇总的信息,包括成交量、成交金额。本交易日可获取前一交易日的数据。
"""
code, result = self.client.getData(vs.FSTTOTAL%(beginDate, endDate,
exchangeCD, field))
return _ret_data(code, result)
def FstDetail(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取上海、深圳交易所公布的每个交易日的融资融券交易具体的信息,包括标的证券信息、融资融券金额以及数量方面的数据。本交易日可获取前一交易日的数据。
"""
code, result = self.client.getData(vs.FSTDETAIL%(secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def EquShare(self, secID='', ticker='', beginDate='', endDate='',
partyID='', field=''):
"""
获取上市公司股本结构及历次股本变动数据。
"""
code, result = self.client.getData(vs.EQUSHARE%(secID, ticker,
beginDate, endDate,
partyID, field))
return _ret_data(code, result)
def SecST(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
通过输入股票ID(通联编制)或股票交易代码(支持多值输入,最大支持50只),选择查询开始日期与结束日期,获取股票在一段时间ST标记信息。
"""
code, result = self.client.getData(vs.SECST%(secID, ticker,
beginDate, endDate, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
bsd-3-clause
|
whatbeg/Data-Analysis
|
widedeeprecommender/widedeeprecommender.py
|
1
|
5614
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Still in experimental stage!
from optparse import OptionParser
import sys
import pandas as pd
import numpy as np
# from ...dataset.transformer import *
# from ...nn.layer import *
# from ...nn.criterion import *
# from ...optim.optimizer import *
# from ...util.common import *
from dataset.transformer import *
from nn.layer import *
from nn.criterion import *
from optim.optimizer import *
from util.common import *
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def build_models(model_type='wide_n_deep', classNum=2):
model = Sequential()
wide_model = Concat(2)
for i in range(1, 8):
wide_model.add(Sequential().add(Select(2, i)).add(Reshape([1])))
deep_model = Sequential()
deep_column = Concat(2)
deep_column.add(Sequential().add(Select(2, 11)).add(LookupTable(9, 8, 0.0)))
deep_column.add(Sequential().add(Select(2, 12)).add(LookupTable(16, 8, 0.0)))
deep_column.add(Sequential().add(Select(2, 13)).add(LookupTable(2, 8, 0.0)))
deep_column.add(Sequential().add(Select(2, 14)).add(LookupTable(6, 8, 0.0)))
deep_column.add(Sequential().add(Select(2, 15)).add(LookupTable(42, 8, 0.0)))
deep_column.add(Sequential().add(Select(2, 16)).add(LookupTable(15, 8, 0.0)))
for i in range(17, 22):
deep_column.add(Sequential().add(Select(2, i)).add(Reshape([1])))
deep_model.add(deep_column).add(Linear(53, 100)).add(ReLU()).add(Linear(100, 50)).add(ReLU())
if model_type == 'wide_n_deep':
wide_model.add(deep_model)
model.add(wide_model).add(Linear(57, classNum)).add(LogSoftMax())
return model
elif model_type == 'wide':
model.add(wide_model).add(Linear(7, classNum)).add(LogSoftMax())
return model
elif model_type == 'deep':
model.add(deep_model).add(Linear(50, classNum)).add(LogSoftMax())
return model
else:
raise ValueError("Not valid model type. Only for wide, deep, wide_n_deep!")
def get_data_rdd(sc, data_type='train'):
if data_type == 'train':
data_tensor = './census/train_tensor.data'
data_label = './census/train_label.data'
elif data_type == 'test':
data_tensor = './census/test_tensor.data'
data_label = './census/test_label.data'
else:
raise ValueError("Not valid Data Type, only 'train' or 'test' !")
features = np.loadtxt(data_tensor, delimiter=',')
labels = np.loadtxt(data_label)
features = sc.parallelize(features)
labels = sc.parallelize(labels)
record = features.zip(labels).map(lambda features_label:
Sample.from_ndarray(features_label[0], features_label[1]+1))
return record
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-a", "--action", dest="action", default="train")
parser.add_option("-b", "--batchSize", dest="batchSize", default="128")
parser.add_option("-m", "--model", dest="model_type", default="wide_n_deep")
(options, args) = parser.parse_args(sys.argv)
sc = SparkContext(appName="wide_n_deep", conf=create_spark_conf())
init_engine()
if options.action == "train":
train_data = get_data_rdd(sc, 'train')
test_data = get_data_rdd(sc, 'test')
state = {"learningRate": 0.001,
"learningRateDecay": 0.0005}
optimizer = Optimizer(
model=build_models(options.model_type, 2),
training_rdd=train_data,
criterion=ClassNLLCriterion(),
optim_method="Adam",
state=state,
end_trigger=MaxEpoch(20),
batch_size=int(options.batchSize))
optimizer.set_validation(
batch_size=256,
val_rdd=test_data,
trigger=EveryEpoch(),
val_method=["Top1Accuracy", "Loss"]
)
optimizer.set_checkpoint(EveryEpoch(), "/tmp/{}/".format(options.model_type))
trained_model = optimizer.optimize()
parameters = trained_model.parameters()
results = trained_model.test(test_data, 256, ["Top1Accuracy"])
for result in results:
print(result)
elif options.action == "test":
# Load a pre-trained model and then validate it through top1 accuracy.
test_data = get_data_rdd(sc, 'test')
# TODO: Pass model path through external parameter
model = Model.load("/tmp/{}/model.5101".format(options.model_type))
results = model.test(test_data, 256, ["Top1Accuracy"])
for result in results:
print(result)
|
apache-2.0
|
ctenix/pytheway
|
MachineL/knn.py
|
1
|
2242
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
def load_datasets(feature_paths, label_paths):
feature = np.ndarray(shape=(0,41))
label = np.ndarray(shape=(0,1))
for file in feature_paths:
df = pd.read_table(file, delimiter=',', na_values='?', header=None)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(df)
df = imp.transform(df)
feature = np.concatenate((feature, df))
for file in label_paths:
df = pd.read_table(file, header=None)
label = np.concatenate((label, df))
label = np.ravel(label)
return feature, label
if __name__ == '__main__':
''' 数据路径 '''
featurePaths = ['A/A.feature','B/B.feature','C/C.feature','D/D.feature','E/E.feature']
labelPaths = ['A/A.label','B/B.label','C/C.label','D/D.label','E/E.label']
''' 读入数据 '''
x_train,y_train = load_datasets(featurePaths[:4],labelPaths[:4])
x_test,y_test = load_datasets(featurePaths[4:],labelPaths[4:])
x_train, x_, y_train, y_ = train_test_split(x_train, y_train, test_size = 0.0)
print('Start training knn')
knn = KNeighborsClassifier().fit(x_train, y_train)
print('Training done')
answer_knn = knn.predict(x_test)
print('Prediction done')
print('Start training DT')
dt = DecisionTreeClassifier().fit(x_train, y_train)
print('Training done')
answer_dt = dt.predict(x_test)
print('Prediction done')
print('Start training Bayes')
gnb = GaussianNB().fit(x_train, y_train)
print('Training done')
answer_gnb = gnb.predict(x_test)
print('Prediction done')
print('\n\nThe classification report for knn:')
print(classification_report(y_test, answer_knn))
print('\n\nThe classification report for DT:')
print(classification_report(y_test, answer_dt))
print('\n\nThe classification report for Bayes:')
print(classification_report(y_test, answer_gnb))
|
gpl-3.0
|
fspaolo/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
6
|
4466
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs. In addition, Joblib
can also be used to provide a light-weight make replacement or caching
solution.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.7.1'
from .memory import Memory
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
panmari/tensorflow
|
tensorflow/contrib/skflow/python/skflow/tests/test_estimators.py
|
1
|
2170
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import random
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib.skflow.python import skflow
class CustomOptimizer(tf.test.TestCase):
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
custom_optimizer = lambda x: tf.train.MomentumOptimizer(x, 0.9)
classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=800,
learning_rate=exp_decay,
optimizer=custom_optimizer)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
glouppe/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
12
|
4007
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
|
bsd-3-clause
|
dr-leo/pandaSDMX
|
pandasdmx/writer/base.py
|
1
|
2221
|
from functools import singledispatch
class BaseWriter:
"""Base class for recursive writers.
Usage:
- Create an instance of this class.
- Use :meth:`register` in the same manner as Python's built-in
:func:`functools.singledispatch` to decorate functions that certain types
of :mod:`pandasdmx.model` or :mod:`pandasdmx.message` objects.
- Call :meth:`recurse` to kick off recursive writing of objects, including
from inside other functions.
Example
-------
MyWriter = BaseWriter('my')
@MyWriter.register
def _(obj: sdmx.model.ItemScheme):
... code to write an ItemScheme ...
return result
@MyWriter.register
def _(obj: sdmx.model.Codelist):
... code to write a Codelist ...
return result
"""
def __init__(self, format_name):
# Create the single-dispatch function
@singledispatch
def func(obj, *args, **kwargs):
raise NotImplementedError(
f"write {obj.__class__.__name__} to " f"{format_name}"
)
self._dispatcher = func
def recurse(self, obj, *args, **kwargs):
"""Recursively write *obj*.
If there is no :meth:`register` 'ed function to write the class of
`obj`, then the parent class of `obj` is used to find a method.
"""
# TODO use a cache to speed up the MRO does not need to be traversed
# for every object instance
dispatcher = getattr(self, "_dispatcher")
try:
# Let the single dispatch function choose the overload
return dispatcher(obj, *args, **kwargs)
except NotImplementedError as exc:
try:
# Use the object's parent class to get a different overload
func = dispatcher.registry[obj.__class__.mro()[1]]
except KeyError:
# Overload for the parent class did not exist
raise exc
return func(obj, *args, **kwargs)
def __call__(self, func):
"""Register *func* as a writer for a particular object type."""
dispatcher = getattr(self, "_dispatcher")
dispatcher.register(func)
return func
|
apache-2.0
|
mitschabaude/nanopores
|
scripts/wei/plot_wei.py
|
1
|
3655
|
# (c) 2017 Gregor Mitscha-Baude
from matplotlib import pyplot as plt
import numpy as np
import nanopores
import nanopores.models.randomwalk as rw
from nanopores.tools import fields
fields.set_dir_dropbox()
name = "rw_wei_3"
#data = rw.load_results(name)
#
#rw.histogram(data, a=-2, b=6)
#rw.hist_poisson(data, "attempts", (1, 10))
csvfile = "tau_off_wei.csv"
data = np.genfromtxt(csvfile, delimiter=',')
bins = data[:, 0]
counts = data[:, 1]
# inspection showed that there seems to be a good,
# evenly spaced approximation to all bins except the first and last with
# spacing 0.55, i.e. of the form (beta + 0.55*np.arange(0, N)) for some beta
x = bins[:-1]
N = len(x)
# minimize norm(x - (beta + 0.55*np.arange(0, N)) w.r.t. beta
beta = x.mean() - 0.55*(N-1)/2.
# turns out beta is close to 0.25, which gives nice numbers,
# so we will just take that
bins = 0.25 + 0.55*np.arange(0, N)
bins = [0.] + list(bins) + [20.]
N = N+1
# the counts should be integer-values, so
counts = np.round(counts).astype(int)
# TODO: need better experimental data => webtool
# now let's reproduce the plot
# first create fake data samples that reproduce the histogram
fake = np.array([])
frac = 1.
while int(counts[0]*frac) > 1:
frac /= 2.
a, b = bins[1]*frac, bins[1]*2*frac
sample = a*(b/a)**(np.random.rand(int(counts[0]*frac)))
fake = np.append(fake, sample)
print "frac", frac
for i in range(1, N):
a, b = bins[i], bins[i+1]
sample = a*(b/a)**(np.random.rand(counts[i]))
fake = np.append(fake, sample)
# now get the same number of samples with binding from our own data
data = rw.load_results(name)
bind = data.bindings > 0
times = 1e-9*data.times # data are in ns, we want s
n = sum(counts)
print "Found %d simulated binding events, have %d experimental binding events." % (sum(bind), n)
if sum(bind) > n:
# only consider same number of bind_times
i = np.flatnonzero(bind)[n-1] + 1
else:
i = len(times)
print "Number of simulated events we use:", i
bind_times = times[:i][bind[:i]]
fail_times = times[:i][data.fail[:i]]
success_times = times[:i][data.success[:i]]
# simple histogram
plt.figure("hist_simple")
plt.hist(fake, bins=bins, label="Wei et al. 2012")
plt.hist(bind_times, bins=bins, histtype="step", rwidth=1., label="Simulation")
plt.legend()
plt.xlabel(r"$\tau$ off [s]")
plt.ylabel("Count")
plt.xlim(0, 20)
plt.figure("hist_attempts")
rw.hist_poisson(data, "attempts", (0, 12), lines=False)
plt.figure("hist_bindings")
rw.hist_poisson(data, "bindings", (0, 4), lines=False)
#rw.hist_poisson(data, "attempts", (1, 10), modified=True)
# histogram plot with short and long events and experimental data
plt.figure("hist")
cutoff = 0.03e-3 # cutoff frequency in s
a, b = -6.5, 3 # log10 of plot interval
bins = np.logspace(a, b, 40)
# successful events
hist = plt.hist(success_times, bins=bins, color="green", rwidth=0.9, label="Translocated")
# failed attempts
hist = plt.hist(fail_times, bins=bins, color="red", rwidth=0.9, label="Did not translocate")
#total = rw.integrate_hist(hist, cutoff)
#tmean = times[times > cutoff].mean()
#T = np.logspace(a-3, b, 1000)
#fT = np.exp(-T/tmean)*T/tmean
#fT *= total/integrate_values(T, fT, cutoff)
#plt.plot(T, fT, label="exp. fit, mean = %.2f ms" % (tmean,),
# color="dark" + color, **params)
#plt.xlim(10**a, 10**b)
#fake0 = fake[fake > bins[1]]
plt.hist(fake, bins=bins, histtype="step", color="orange", label="Wei et al. 2012")
plt.xscale("log")
plt.yscale("log")
plt.ylabel("Count")
plt.xlabel(r"$\tau$ off [s]")
plt.ylim(ymin=1., ymax=1e5)
plt.legend()
#plt.show()
import folders
nanopores.savefigs("tau_off", folders.FIGDIR + "/wei", (4, 3))
|
mit
|
rsivapr/scikit-learn
|
sklearn/gaussian_process/tests/test_gaussian_process.py
|
4
|
3347
|
"""
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a one-dimensional Gaussian Process model.
Check random start optimization.
Test the interpolating property.
"""
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
"""
MLE estimation of a two-dimensional Gaussian Process model accounting for
anisotropy. Check random start optimization.
Test the interpolating property.
"""
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
"""
Repeat test_1d and test_2d for several built-in correlation
models specified as strings.
"""
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
"""
Repeat test_1d and test_2d with given regression weights (beta0) for
different regression models (Ordinary Kriging).
"""
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
|
bsd-3-clause
|
ccxt/ccxt
|
examples/py/poloniex-fetch-trades-with-pagination-to-csv.py
|
1
|
1723
|
# -*- coding: utf-8 -*-
import os
import sys
import pandas as pd
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.poloniex({'enableRateLimit': True})
exchange.load_markets()
# Poloniex will only serve one year of data into the past
symbol = 'BTC/USDT'
before = exchange.milliseconds()
previous_length = 0
all_trades = {}
while True:
try:
print('---------------------------------------------------------------')
print('Fetching trades before', exchange.iso8601(before))
params = {
'end': int(before / 1000) # end param must be in seconds
}
trades = exchange.fetch_trades(symbol, None, None, params)
trades_by_id = exchange.index_by(trades, 'id')
before = trades[0]['timestamp']
all_trades = exchange.extend(all_trades, trades_by_id)
total_length = len(all_trades.keys())
print('Fetched', total_length, 'trades in total')
if total_length == previous_length:
break
previous_length = total_length
except ccxt.NetworkError as e:
print(e) # retry on next iteration
except ccxt.ExchangeError as e:
print(e)
break
all_trades = exchange.sort_by(all_trades.values(), 'id')
print('Fetched', len(all_trades), 'trades since', all_trades[0]['datetime'], 'till', all_trades[-1]['datetime'])
# omitted_keys = ['fee', 'info']
# all_trades = [exchange.omit(trade, omitted_keys) for trade in all_trades]
# path_to_your_csv_file = 'trades.csv' # change for your path here
# df = pd.DataFrame(all_trades)
# df.to_csv(path_to_your_csv_file, index=None, header=True)
|
mit
|
huig-/Computational-Geometry
|
Curves/polynomial_curve_fitting.py
|
1
|
5546
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def polynomial_curve_fitting(points, knots, method, L=0, libraries=False,
num_points=100, degree=None):
'''
Fits planar curve to points at given knots.
Arguments:
points -- coordinates of points to adjust (x_i, y_i) given by a numpy array of shape (N, 2)
knots -- strictly increasing sequence at which the curve will fit the points, tau_i
It is given by a np.array of shape N, unless knots='chebyshev', in this case
N Chebyshev's nodes between 0 and 1 will be used instead of tau.
method -- one of the following:
'newton' computes the interpolating polynomial curve using Newton's method.
'least_squares' computes the best adjusting curve in the least square sense,
i.e., min_a ||Ca - b||**2 + L/2 ||a||**2
L -- regularization parameter
libraries -- If False, only numpy linear algebra operations are allowed.
If True, any module can be used. In this case, a very short and fast code is expected
num_points -- number of points to plot between tau[0] and tau[-1]
degree -- degree of the polynomial. Needed only if method='least_squares'.
If degree=None, the function will return the interpolating polynomial.
Returns:
numpy array of shape (num_points, 2) given by the evaluation of the polynomial
at the evenly spaced num_points between tau[0] and tau[-1]
'''
if knots == 'chebyshev':
knots = chebyshev_knots(0, 1, points.shape[0])
if method == 'newton':
return newton_polynomial(points,knots,num_points,libraries)
elif method == 'least_squares':
return least_squares_fitting(points, knots, degree, num_points, L, libraries)
def polynomial_curve_fitting1d(points, knots, method, L=0, libraries=False,
num_points=100):
pass
def newton_polynomial(x, tau, num_points=100, libraries=False):
'''
Computes de Newton's polynomial interpolating values x at knots tau
x: numpy array of size n; points to interpolate
tau: numpy array of size n; knots tau[0] < tau[1] < ... < tau[n-1]
num_points: number of points at which the polynomial will be
evaluated
libraries: False means only linear algebra can be used
True means every module can be used.
returns:
numpy array of size num_points given by the polynomial
evaluated at np.linspace(tau[0], tau[1], num_points)
Maximum cost allowed: 5,43 s at lab III computers
degree = n - 1 = 9
num_points = 100
'''
if libraries == False:
n = x.shape[0]
m = x.ndim
## Compute divided differences ##
if m > 1 : tau = tau[:,np.newaxis]
aux = x
divided_differences = np.array([aux[0]])
for k in range(1,n):
aux = np.divide(aux[1:] - aux[0:-1],(tau[k:] - tau[0:-k])*1.0)
divided_differences = np.append(divided_differences,[aux[0]],axis=0)
## Compute polynomial ##
t = np.linspace(tau[0],tau[-1],num_points)
product = np.multiply
# A few tweaks to work with dimensions greater than 1.
# It allows using the same code for m=1 but with matrices.
if m>1:
t = t[:,np.newaxis]
divided_differences = divided_differences[:,np.newaxis,:]
product = np.dot
# Compute the polynomial using Horner's fast evalution method.
polynomial = divided_differences[-1]
# The first iteration is slightly different when working with matrices.
if n>1:
polynomial = divided_differences[-2] + product((t - tau[-2]),polynomial)
for k in range(n-3,-1,-1):
polynomial= divided_differences[k]+(t-tau[k])*polynomial
return polynomial
else:
t = np.linspace(tau[0],tau[-1],num_points)
coef = np.polyfit(tau,x,x.shape[0]-1)
if x.ndim > 1:
polynomial = np.empty((t.shape[0],x.shape[1]))
for k in range(0,x.shape[1]):
polynomial[:,k] = np.polyval(coef[:,k],t)
else:
polynomial = np.polyval(coef,t)
return polynomial
def eval_poly(t, coefs, tau=None):
pass
def least_squares_fitting(points, knots, degree, num_points, L=0, libraries=True):
if degree == None:
degree = points.shape[0]-1
if libraries:
coeffs = np.polyfit(knots, points, degree)
t = np.linspace(knots[0], knots[-1], num_points)
polynomial = np.empty((t.shape[0], points.shape[1]))
for k in xrange(points.shape[1]):
polynomial[:,k] = np.polyval(coeffs[:,k],t)
return polynomial
else:
n = knots.shape[0]
C = np.vander(knots, degree+1)
if n == (degree+1):
coeffs = np.linalg.solve(np.add(C, L*0.5*np.identity(degree+1)), points)
else:
coeffs = np.linalg.solve(np.add(np.dot(np.transpose(C), C), L*0.5*np.identity(degree+1)), np.dot(np.transpose(C), points))
t = np.linspace(knots[0], knots[-1], num_points)
polynomial = np.empty((t.shape[0], points.shape[1]))
for k in xrange(points.shape[1]):
polynomial[:,k] = np.polyval(coeffs[:,k],t)
return polynomial
def chebyshev_knots(a, b, n):
tau = np.empty(n)
for i in xrange(1,n+1):
tau[i-1] = (a+b - (a-b)*np.cos((2*i-1)*np.pi/(2.*n)))*0.5
return tau
|
gpl-3.0
|
faner-father/tushare
|
tushare/datayes/idx.py
|
17
|
1509
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Idx():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Idx(self, secID='', ticker='', field=''):
"""
获取国内外指数的基本要素信息,包括指数名称、指数代码、发布机构、发布日期、基日、基点等。
"""
code, result = self.client.getData(vs.IDX%(secID, ticker, field))
return _ret_data(code, result)
def IdxCons(self, secID='', ticker='', intoDate='', isNew='', field=''):
"""
获取国内外指数的成分构成情况,包括指数成分股名称、成分股代码、入选日期、剔除日期等。
"""
code, result = self.client.getData(vs.IDXCONS%(secID, ticker, intoDate,
intoDate, isNew, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/window/test_expanding.py
|
1
|
6577
|
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.window import Expanding
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
def test_constructor(which):
# GH 12669
c = which.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
# not valid
for w in [2.0, "foo", np.array([2])]:
with pytest.raises(ValueError):
c(min_periods=w)
with pytest.raises(ValueError):
c(min_periods=1, center=w)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
"expander",
[
1,
pytest.param(
"ls",
marks=pytest.mark.xfail(
reason="GH#16425 expanding with offset not supported"
),
),
],
)
def test_empty_df_expanding(expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(index=pd.DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
def test_expanding_axis(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame(
{i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}
)
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_expanding_count_with_min_periods(constructor):
# GH 26996
result = constructor(range(5)).expanding(min_periods=3).count()
expected = constructor([np.nan, np.nan, 3.0, 4.0, 5.0])
tm.assert_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_expanding_count_default_min_periods_with_null_values(constructor):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
result = constructor(values).expanding().count()
expected = constructor(expected_counts)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
1,
),
(DataFrame({"A": [1], "B": [4]}), [], 2),
(DataFrame(), [({}, [])], 1),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
1,
),
],
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, df.expanding(min_periods)):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,min_periods",
[
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),
(Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2),
(Series([np.nan, 2]), [([np.nan], [0]), ([np.nan, 2], [0, 1])], 2),
(Series([], dtype="int64"), [], 2),
],
)
def test_iter_expanding_series(ser, expected, min_periods):
# GH 11704
expected = [Series(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, ser.expanding(min_periods)):
tm.assert_series_equal(actual, expected)
|
bsd-3-clause
|
all-umass/metric-learn
|
test/test_base_metric.py
|
1
|
6495
|
import pytest
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from sklearn.utils.testing import set_random_state
from test.test_utils import ids_metric_learners, metric_learners
class TestStringRepr(unittest.TestCase):
def test_covariance(self):
self.assertEqual(str(metric_learn.Covariance()),
"Covariance(preprocessor=None)")
def test_lmnn(self):
self.assertRegexpMatches(
str(metric_learn.LMNN()),
r"(python_)?LMNN\(convergence_tol=0.001, k=3, learn_rate=1e-07, "
r"max_iter=1000,\n min_iter=50, preprocessor=None, "
r"regularization=0.5, use_pca=True,\n verbose=False\)")
def test_nca(self):
self.assertEqual(str(metric_learn.NCA()),
"NCA(max_iter=100, num_dims=None, preprocessor=None, "
"tol=None, verbose=False)")
def test_lfda(self):
self.assertEqual(str(metric_learn.LFDA()),
"LFDA(embedding_type='weighted', k=None, num_dims=None, "
"preprocessor=None)")
def test_itml(self):
self.assertEqual(str(metric_learn.ITML()), """
ITML(A0=None, convergence_threshold=0.001, gamma=1.0, max_iter=1000,
preprocessor=None, verbose=False)
""".strip('\n'))
self.assertEqual(str(metric_learn.ITML_Supervised()), """
ITML_Supervised(A0=None, bounds='deprecated', convergence_threshold=0.001,
gamma=1.0, max_iter=1000, num_constraints=None,
num_labeled='deprecated', preprocessor=None, verbose=False)
""".strip('\n'))
def test_lsml(self):
self.assertEqual(
str(metric_learn.LSML()),
"LSML(max_iter=1000, preprocessor=None, prior=None, tol=0.001, "
"verbose=False)")
self.assertEqual(str(metric_learn.LSML_Supervised()), """
LSML_Supervised(max_iter=1000, num_constraints=None, num_labeled='deprecated',
preprocessor=None, prior=None, tol=0.001, verbose=False,
weights=None)
""".strip('\n'))
def test_sdml(self):
self.assertEqual(str(metric_learn.SDML()),
"SDML(balance_param=0.5, preprocessor=None, "
"sparsity_param=0.01, use_cov=True,\n verbose=False)")
self.assertEqual(str(metric_learn.SDML_Supervised()), """
SDML_Supervised(balance_param=0.5, num_constraints=None,
num_labeled='deprecated', preprocessor=None, sparsity_param=0.01,
use_cov=True, verbose=False)
""".strip('\n'))
def test_rca(self):
self.assertEqual(str(metric_learn.RCA()),
"RCA(num_dims=None, pca_comps=None, preprocessor=None)")
self.assertEqual(str(metric_learn.RCA_Supervised()),
"RCA_Supervised(chunk_size=2, num_chunks=100, "
"num_dims=None, pca_comps=None,\n "
"preprocessor=None)")
def test_mlkr(self):
self.assertEqual(str(metric_learn.MLKR()),
"MLKR(A0=None, max_iter=1000, num_dims=None, "
"preprocessor=None, tol=None,\n verbose=False)")
def test_mmc(self):
self.assertEqual(str(metric_learn.MMC()), """
MMC(A0=None, convergence_threshold=0.001, diagonal=False, diagonal_c=1.0,
max_iter=100, max_proj=10000, preprocessor=None, verbose=False)
""".strip('\n'))
self.assertEqual(str(metric_learn.MMC_Supervised()), """
MMC_Supervised(A0=None, convergence_threshold=1e-06, diagonal=False,
diagonal_c=1.0, max_iter=100, max_proj=10000, num_constraints=None,
num_labeled='deprecated', preprocessor=None, verbose=False)
""".strip('\n'))
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_is_independent_from_metric_learner(estimator,
build_dataset):
"""Tests that the get_metric method returns a function that is independent
from the original metric learner"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
# we fit the metric learner on it and then we compute the metric on some
# points
model.fit(input_data, labels)
metric = model.get_metric()
score = metric(X[0], X[1])
# then we refit the estimator on another dataset
model.fit(np.sin(input_data), labels)
# we recompute the distance between the two points: it should be the same
score_bis = metric(X[0], X[1])
assert score_bis == score
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_raises_error(estimator, build_dataset):
"""Tests that the metric returned by get_metric raises errors similar to
the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
metric = model.get_metric()
list_test_get_metric_raises = [(X[0].tolist() + [5.2], X[1]), # vectors with
# different dimensions
(X[0:4], X[1:5]), # 2D vectors
(X[0].tolist() + [5.2], X[1] + [7.2])]
# vectors of same dimension but incompatible with what the metric learner
# was trained on
for u, v in list_test_get_metric_raises:
with pytest.raises(ValueError):
metric(u, v)
@pytest.mark.parametrize('estimator, build_dataset', metric_learners,
ids=ids_metric_learners)
def test_get_metric_works_does_not_raise(estimator, build_dataset):
"""Tests that the metric returned by get_metric does not raise errors (or
warnings) similarly to the distance functions in scipy.spatial.distance"""
input_data, labels, _, X = build_dataset()
model = clone(estimator)
set_random_state(model)
model.fit(input_data, labels)
metric = model.get_metric()
list_test_get_metric_doesnt_raise = [(X[0], X[1]),
(X[0].tolist(), X[1].tolist()),
(X[0][None], X[1][None])]
for u, v in list_test_get_metric_doesnt_raise:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
# Test that the scalar case works
model.transformer_ = np.array([3.1])
metric = model.get_metric()
for u, v in [(5, 6.7), ([5], [6.7]), ([[5]], [[6.7]])]:
with pytest.warns(None) as record:
metric(u, v)
assert len(record) == 0
if __name__ == '__main__':
unittest.main()
|
mit
|
MadsJensen/CAA
|
sk_predict_condition_ali_grads.py
|
1
|
1408
|
import numpy as np
import pandas as pd
from my_settings import (tf_folder, subjects_select)
from sklearn.ensemble import AdaBoostClassifier
from sklearn.cross_validation import StratifiedShuffleSplit, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
ctl_left = []
ctl_right = []
ent_left = []
ent_right = []
for subject in subjects_select:
data = np.load(tf_folder + "%s_ali.npy" % subject)
ctl_left.append(data[0, :])
ctl_right.append(data[1, :])
ent_left.append(data[2, :])
ent_right.append(data[3, :])
ctl_left = np.asarray(ctl_left)
ctl_right = np.asarray(ctl_right)
ent_left = np.asarray(ent_left)
ent_right = np.asarray(ent_right)
X = np.vstack((ctl_left, ctl_right, ent_left, ent_right))
y = np.concatenate((np.zeros(len(ctl_left)), np.ones(len(ctl_right)), np.ones(
len(ent_left) * 2), np.ones(len(ent_right) * 3)))
cv = StratifiedShuffleSplit(y, n_iter=10)
ada_params = {
"adaboostclassifier__n_estimators": np.arange(1, 50, 1),
"adaboostclassifier__learning_rate": np.arange(0.01, 1, 0.1)
}
ada = AdaBoostClassifier
scaler_pipe = make_pipeline(StandardScaler(), AdaBoostClassifier())
grid = GridSearchCV(scaler_pipe, param_grid=ada_params, cv=cv)
grid.fit(X, y)
ada = grid.best_estimator_
scores = cross_val_score(ada, X, y, cv=cv, scoring="accuracy")
|
bsd-3-clause
|
norheim/pextant
|
pextant/analysis/slope_analysis.py
|
2
|
3808
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from scipy import interpolate
from pextant.lib.geoshapely import GeoPolygon, LAT_LONG, UTM, Cartesian, XY
from pextant.EnvironmentalModel import GDALMesh
from pextant.analysis.loadWaypoints import JSONloader
# Found manually doing using import gps notebook
traverses = {
'MD2': {
'name': '20161108A_EV1',
'json': 'HI_08Nov16_MD2_A',
'data': np.array([[4509,5088],[5937,6516],[10850,11030],[13530,14110]])},
'MD3': {
'name': '20161109A_EV1',
'json': 'HI_09Nov16_MD3_A',
'data':np.array([[3400,4660],[5010,5500],[18100,18393]]),
},
'MD4': {
'name': '20161110A_EV1',
'json': 'HI_10Nov16_MD4_A',
'data': np.array([[2840,4034],[13150,13829]]),
},
'MD5': {
'name': '20161111A_EV1',
'json': 'HI_11Nov16_MD5_A',
'data': np.array([[3500,5000]])
},
'MD6': {
'name': '20161112A_EV1',
'json': 'HI_12Nov16_MD6_A',
'data': np.array([[3990,5500]])},
'MD7': {
'name': '20161113A_EV1',
'json': 'HI_13Nov16_MD7_A',
'data': np.array([[4200,5700]])
},
'MD8': {
'name': '20161114A_EV1',
'json': 'HI_14Nov16_MD8_A',
'data': np.array([[4600,6500],[10400,11000]])
},
'MD9': {
'name': '20161115A_EV1',
'json': 'HI_15Nov16_MD9_A',
'data': np.array([[7900,9600]])
}
}
gm = GDALMesh('../../data/maps/HI_lowqual_DEM.tif')
pd.options.display.max_rows = 7
traversesallowed = ['MD9','MD8','MD6'] #M7 is bad
allslopes = []
allvelocities = []
for traverse in traversesallowed:
#traverse = 'MD9'
bounds = traverses[traverse]['data']
csv_filename = '../../data/ev_tracks/%s.csv'%(traverses[traverse]['name'])
json_filename = '../../data/waypoints/%s.json'%traverses[traverse]['json']
delimiter = ","
header_row = 0
df = pd.read_csv(csv_filename, sep=delimiter, header=header_row)
gp = GeoPolygon(LAT_LONG,*df[['latitude', 'longitude']].as_matrix().transpose())
wp = JSONloader.from_file(json_filename).get_waypoints()
em = gm.loadSubSection(gp.geoEnvelope())
rr = df['cumulative distance (m)'].as_matrix()
df['date_time'] = pd.to_datetime(df['epoch timestamp'],unit='s')
time = df['date_time']-df['date_time'][0]
times = time.astype('timedelta64[s]').as_matrix()
f = interpolate.interp2d(np.arange(em.numCols),np.arange(em.numRows),em.dataset)
XYC = XY(em.nw_geo_point, em.resolution)
elevation = np.array([f(xwp,ywp) for xwp,ywp in gp.to(XYC).transpose()])
elevation = elevation.transpose()[0]
start_time, end_time = bounds.transpose()
for idx in range(len(start_time)):
t = times[start_time[idx]:end_time[idx]]
r = rr[start_time[idx]:end_time[idx]]
rf = savgol_filter(r,59,2)
z = elevation[start_time[idx]:end_time[idx]]
zf = savgol_filter(z,59,2)
slopes = np.degrees(np.arctan(np.gradient(zf)/np.gradient(rf)))
velocity = np.sqrt(np.square(np.gradient(rf,0.5))+np.square(np.gradient(zf,0.5)))
allslopes= np.append(allslopes, slopes)
allvelocities = np.append(allvelocities, velocity)
#plt.scatter(slopes,velocity,marker=".",color='orange')
bins = np.linspace(-20,20,40)
v = np.zeros(len(bins))
for i in range(len(bins)-1):
idx1 = np.where(np.logical_and(bins[i] <= allslopes, allslopes < bins[i+1] ))
v[i] = np.average(allvelocities[idx1])
from pextant.explorers import Astronaut
slopes = np.linspace(-25, 25, 100)
a = Astronaut(80)
#plt.plot(slopes, a.velocity(slopes))
plt.hexbin(allslopes, allvelocities)
plt.grid()
plt.xlabel('slopes [degrees]')
plt.ylabel('velocity [m/s]')
plt.xlim([-25,25])
plt.show()
|
mit
|
bdweave/repetitive_element_parser
|
Repetitive_Element_Parser.py
|
1
|
7625
|
import numpy as np
import pandas as pd
from ipywidgets import *
from IPython.display import display
from sqlalchemy import create_engine
import os
import argparse
#dependencies are imported above
#the following code specifies command-line interface
def valid_file(param):
base, ext = os.path.splitext(param)
if ext.lower() not in (['.xlsx']):
raise argparse.ArgumentTypeError('File must have a .xlsx extension')
return param
parser = argparse.ArgumentParser(prog="Repetitive Element Parser",
description='input excel file and sheet number to parse and return rep-els from')
parser.add_argument("sheet", type=valid_file, help="input the filename \
of an excel spreadsheet containing the expression data")
parser.add_argument("-sn", "--sheetnumber", default=0, action='store', type=int, help="input \
the sheetnumber of interest \
indexed at 0")
parser.add_argument("-ma", "--maplot", default=False, action='store_true', help="create an \
ma_plot of the relevant elements")
parser.add_argument("--ERV", action='store_true', default=False, help="return a plot of ERV elements")
parser.add_argument("--LTR", action='store_true', default=False, help="return a plot of LTR elements")
parser.add_argument("--LINE", action='store_true', default=False, help="return a plot of LINE elements")
parser.add_argument("--SINE", action='store_true', default=False, help="return a plot of SINE elements")
results = parser.parse_args()
gene_list = pd.read_excel(results.sheet, sheetname=results.sheetnumber, header=0)
#defining the class of differential expression objects
class diff_expression_df:
"""This class defines the functionality associated with visualizing and parsing repetitive element expression
data from a differential read counts program, such as DRDS. The RECO class represents a 'tidy' data container
(i.e. one that has already been tidied up in various ways such that it fulfills the 'tidy data' requirements
outlined by Wikham in the Journal of Statistical Software),
in this case the data should be in a pandas dataframe, but series type object would also work."""
def __init__(self, my_dataframe):
#create an instance of the object
self.my_dataframe = my_dataframe
self.my_dataframe.fillna(0)
#converting baseMean values to log10 for better visualization of the data
self.my_dataframe["baseMean_log_10"] = \
self.my_dataframe.apply(lambda row: np.log10(np.abs(row["baseMean"])+1), axis=1)
#transfer dataframe to an sqlite database - this could be used for subsequent parsing to make it faster
#and more memory efficient
engine = create_engine('sqlite:///')
self.my_dataframe_sql = self.my_dataframe.to_sql("new_database", con=engine, if_exists='replace')
#families
self.ERVL_fam = self.my_dataframe[my_dataframe["Family"]=='ERVL']
#classes
self.LINE_class = self.my_dataframe[self.my_dataframe["Type"]=='LINE']
self.SINE_class = self.my_dataframe[self.my_dataframe["Type"]=='SINE']
self.LTR_class = self.my_dataframe[self.my_dataframe["Type"]=='LTR']
def __str__(self):
return
"""This class object represents a dataframe that will be parsed and sent to an SQLite database
for further efficient parsing. The class object extends repetitive element parsing functionality to
a standard differential expression output file that is converted into a pandas Dataframe object.
The .__init__() method can be altered to parse the dataframe in any number of ways consistent with
a pandas dataframe index.
The '.ma_plot()' method returns a stereotypical MA-plot of the data, and is helpful for identifying
repetitive elements of interest and comparing multiple MA-plots of different repetitive element
classes and families. It takes zero or no arguments, arguments include 'all', 'LTR', 'ERVL', 'LINE',
'SINE'."""
def ma_plot(self, elements='all'): #how do I link up the elements argument to the user input from the widget?
from bokeh.plotting import figure, output_file, output_notebook, show
from bokeh.charts import Scatter
from bokeh.layouts import row
tooltips= [("Name", "@Name")]
LTRs = Scatter(self.LTR_class, x="baseMean_log_10", y="log2FoldChange", title="LTR Expression",
xlabel="log 10 mean expression", ylabel="log 2 fold change", tooltips=tooltips, color='red')
ERVLs = Scatter(self.ERVL_fam, x="baseMean_log_10", y="log2FoldChange", title="ERVL Expression",
xlabel="log 10 mean expression", ylabel="log 2 fold change", tooltips=tooltips, color='blue')
LINEs = Scatter(self.LINE_class, x="baseMean_log_10", y="log2FoldChange", title="LINE Expression",
xlabel="log 10 mean expression", ylabel="log 2 fold change", tooltips=tooltips, color='purple')
SINEs = Scatter(self.SINE_class, x="baseMean_log_10", y="log2FoldChange", title="SINE Expression",
xlabel="log 10 mean expression", ylabel="log 2 fold change", tooltips=tooltips, color='green')
if elements==('all'): #need to figure out how to bump out of the conditionals below without having to rewite all of the code
show(row(LTRs,ERVLs,LINEs,SINEs))
output_notebook()
elif elements==('LTR'):
show(row(LTRs))
output_notebook()
elif elements==('ERVL'):
show(row(ERVLs))
output_notebook()
elif elements==('LINE'):
show(row(LINEs))
output_notebook()
elif elements==('SINE'):
show(row(SINEs))
output_notebook()
repetitive_elements = diff_expression_df(gene_list)
def plotter():
if results.ERV==True and results.maplot==True:
return repetitive_elements.ma_plot('ERVL')
elif results.LTR==True and results.maplot==True:
return repetitive_elements.ma_plot('LTR')
elif results.LINE==True and results.maplot==True:
return repetitive_elements.ma_plot('LINE')
elif results.SINE==True and results.maplot==True:
return repetitive_elements.ma_plot('SINE')
else:
repetitive_elements.ma_plot()
plotter()
#here I will make a class of repetitive elements that inherits functionality from the diff_exp_df class
#but that also contains its own functionality
class ERVL(diff_expression_df):
def __init__(self, data):
super(ERVL, self).__init__(data)
self.ERVL_fam = data[data["Family"]=='ERVL']
def __str__(self):
"""This class is an inherited class based up on the diff_expression_df class. It implements the same
functionality and methods, but extends the parental functions by including an 'ervs_up' function."""
def ERVL_up(self, log2FC=4):
"""Return a list of upregulated ERVS of log2FC > 4"""
return self.ERVL_fam[self.ERVL_fam['log2FoldChange'] >= 3]
|
mit
|
holsety/tushare
|
tushare/stock/shibor.py
|
38
|
5010
|
# -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
|
bsd-3-clause
|
herilalaina/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
77
|
2319
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
# #############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
krischer/jane
|
src/jane/stationxml/plugins.py
|
1
|
7215
|
# -*- coding: utf-8 -*-
import io
from django.contrib.auth.models import AnonymousUser
from django.contrib.gis.geos.point import Point
import matplotlib
# Use anti-grain geometry interface which does not require an open display.
matplotlib.use('agg')
import matplotlib.pylab as plt # noqa
from obspy.io.stationxml.core import validate_stationxml # noqa
import obspy # noqa
from jane.documents.plugins import (
ValidatorPluginPoint, IndexerPluginPoint, DocumentPluginPoint,
RetrievePermissionPluginPoint) # noqa
from jane.waveforms.models import Restriction # noqa
class StationXMLPlugin(DocumentPluginPoint):
name = 'stationxml'
title = "StationXML Plugin for Jane's Document Database"
default_content_type = 'text/xml'
class StationValidatorPlugin(ValidatorPluginPoint):
name = 'stationxml'
title = 'StationXML XMLSchema Validator'
def validate(self, document):
is_stationxml, error = validate_stationxml(document)
if not is_stationxml:
raise ValueError(error)
return True
class CanSeeAllStations(RetrievePermissionPluginPoint):
"""
If a user does not have this permission, the waveform restrictions will
also apply to the documents.
"""
name = 'stationxml'
title = 'Can See All Stations'
# Permission codename and name according to Django's nomenclature.
permission_codename = 'can_see_all_stations'
permission_name = 'Can See All Stations'
def filter_queryset_user_has_permission(self, queryset, model_type, user):
# If the user has the permission, everything is fine and the
# original queryset can be returned.
return queryset
def filter_queryset_user_does_not_have_permission(self, queryset,
model_type, user):
if not user or isinstance(user, AnonymousUser):
restrictions = Restriction.objects.all()
else:
restrictions = Restriction.objects.exclude(users=user)
# model_type can be document or document index.
if model_type == "document":
# XXX: Find a good way to do this.
pass
elif model_type == "index":
for restriction in restrictions:
queryset = queryset.exclude(json__network=restriction.network,
json__station=restriction.station)
else:
raise NotImplementedError()
return queryset
class StationIndexerPlugin(IndexerPluginPoint):
name = 'stationxml'
title = 'StationXML Indexer'
meta = {
"network": "str",
"station": "str",
"location": "str",
"channel": "str",
"latitude": "float",
"longitude": "float",
"elevation_in_m": "float",
"depth_in_m": "float",
"azimuth": "float",
"dip": "float",
"start_date": "UTCDateTime",
"end_date": "UTCDateTime",
"station_creation_date": "UTCDateTime",
"sample_rate": "float",
"sensor_type": "str",
"total_sensitivity": "float",
"sensitivity_frequency": "float",
"units_after_sensitivity": "str"
}
def index(self, document):
inv = obspy.read_inventory(document, format="stationxml")
indices = []
for network in inv:
for station in network:
for channel in station:
if channel.response:
if channel.response.instrument_sensitivity:
_i = channel.response.instrument_sensitivity
total_sensitivity = _i.value
sensitivity_frequency = _i.frequency
units_after_sensitivity = _i.input_units
else:
total_sensitivity = None
sensitivity_frequency = None
units_after_sensitivity = None
else:
total_sensitivity = None
sensitivity_frequency = None
units_after_sensitivity = None
index = {
# Information.
"network": network.code,
"network_name": network.description,
"station": station.code,
"station_name": station.description if
station.description else station.site.name,
"location": channel.location_code,
"channel": channel.code,
# Coordinates and orientation.
"latitude": channel.latitude,
"longitude": channel.longitude,
"elevation_in_m": channel.elevation,
"depth_in_m": channel.depth,
"dip": channel.dip,
"azimuth": channel.azimuth,
# Dates.
"start_date": str(channel.start_date),
"end_date": str(channel.end_date)
if channel.end_date is not None else None,
# This is strictly speaking not channel level
# information but needed to for a fast generation of
# the station level fdsnws responses.
"station_creation_date": str(station.creation_date)
if station.creation_date is not None else None,
# Characteristics.
"sample_rate": float(channel.sample_rate),
"sensor_type": channel.sensor.type
if channel.sensor else None,
# Some things have to be extracted from the response.
"total_sensitivity": total_sensitivity,
"sensitivity_frequency": sensitivity_frequency,
"units_after_sensitivity": units_after_sensitivity,
# Geometry for PostGIS.
"geometry": [Point(channel.longitude,
channel.latitude)],
}
try:
plt.close()
except:
pass
# Sometimes fails. Wrap in try/except.
try:
# Plot response.
with io.BytesIO() as plot:
channel.plot(min_freq=1E-3, outfile=plot)
plot.seek(0)
index["attachments"] = {
"response": {"content-type": "image/png",
"data": plot.read()}}
except Exception:
pass
finally:
try:
plt.close()
except:
pass
indices.append(index)
return indices
|
gpl-3.0
|
fluxcapacitor/source.ml
|
jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/SkFlow_DEPRECATED/text_classification_save_restore.py
|
5
|
3732
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 10
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 50
def average_model(X, y):
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
return skflow.models.logistic_regression(features, y)
def rnn_model(X, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = skflow.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, word_vectors)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = rnn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
return skflow.models.logistic_regression(encoding, y)
model_path = '/tmp/skflow_examples/text_classification'
if os.path.exists(model_path):
classifier = skflow.TensorFlowEstimator.restore(model_path)
else:
classifier = skflow.TensorFlowEstimator(model_fn=rnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuesly train for 1000 steps
while True:
try:
classifier.fit(X_train, y_train)
except KeyboardInterrupt:
classifier.save(model_path)
break
# Predict on test set
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
apache-2.0
|
patrickbwarren/SunlightHNC
|
gw_p_compare.py
|
1
|
2321
|
#!/usr/bin/env python3
# This file is part of SunlightDPD - a home for open source software
# related to the dissipative particle dynamics (DPD) simulation
# method.
# Based on an original code copyright (c) 2007 Lucian Anton.
# Modifications copyright (c) 2008, 2009 Andrey Vlasov.
# Additional modifications copyright (c) 2009-2017 Unilever UK Central
# Resources Ltd (Registered in England & Wales, Company No 29140;
# Registered Office: Unilever House, Blackfriars, London, EC4P 4BQ,
# UK).
# SunlightDPD is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# SunlightDPD is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SunlightDPD. If not, see <http://www.gnu.org/licenses/>.
# The results of this calculation can be directly compared with Fig 4
# of the Groot and Warren [J. Chem. Phys. v107, 4423 (1997)]. The
# data from that figure is coded below.
# rho (p-rho)/(A*rho^2)
data = [[0.0, 0.0379935086163],
[1.5, 0.0751786298043],
[2.5, 0.0886823425022],
[3.0, 0.0924251622846],
[3.5, 0.0946639891655],
[4.0, 0.0965259421847],
[5.0, 0.0987451548125],
[6.0, 0.0998358473824],
[7.0, 0.1005510671090],
[8.0, 0.102017933031]]
xdata = list(data[i][0] for i in range(len(data)))
ydata = list(data[i][1] for i in range(len(data)))
from oz import wizard as w
w.initialise()
w.arep[0,0] = A = 25.0
w.dpd_potential()
npt = 41
rhomax = 10.0
x = []
y = []
for i in range(npt):
w.rho[0] = rho = rhomax * (i + 1.0) / npt
w.hnc_solve()
x.append(rho)
y.append((w.press-rho)/(A*rho*rho))
print("%f\t%g\t%g" % (rho, (w.press-rho)/(A*rho*rho), w.error))
import matplotlib.pyplot as plt
plt.plot(xdata, ydata, 'ro', label='Groot & Warren (1997)')
plt.plot(x, y, label='HNC')
plt.xlabel('$\\rho$')
plt.ylabel('$(p-\\rho)/A\\rho^2$')
plt.legend(loc='lower right')
plt.show()
|
gpl-2.0
|
marktoakley/PyLatt
|
examples/1_montecarlo_2d.py
|
1
|
1076
|
'''
This example demonstrates the setting up and running of a Monte Carlo
search on a two-dimensional square lattice.
@author: Mark Oakley
'''
from pylatt.model import HP
from pylatt.lattice import SquareLattice
from pylatt.plotter import plot_2d
from pylatt.search import MonteCarlo
import matplotlib.pyplot as plt
''' First choose a lattice. Here, we will use the two-dimensional
square lattice.'''
lattice = SquareLattice()
'''Select the protein sequrnce to search. Using the classic hydrophobic
polar model, we need to define a sequence of H and P residues.'''
model = HP("PHPPHPHHHPHHPHHHHH")
'''Now set up and run a Monte Carlo search. The temperature is an
optional parameter and defaults to 1.0 if not defined.'''
search = MonteCarlo(lattice, model, temperature = 2.0)
'''Run the search for 100 steps.The search returns the lowest energy
structure found. The energy of this structure is in structure.energy.'''
structure = search.run(100)
print("Lowest energy found: ", structure.energy)
'''To visualise this structure, use plot_2d.'''
plot_2d(structure)
plt.show()
|
gpl-3.0
|
manashmndl/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
243
|
7461
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
joernhees/scikit-learn
|
sklearn/linear_model/__init__.py
|
83
|
3139
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
cmcantalupo/geopm
|
scripts/setup.py
|
2
|
5275
|
#!/usr/bin/env python3
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import shutil
import subprocess
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if os.getcwd() != os.path.dirname(os.path.abspath(__file__)):
sys.stderr.write('ERROR: script must be run in the directory that contains it\n')
exit(1)
try:
# use excfile rather than import so that setup.py can be executed
# on a system missing dependencies required to import geopmpy.
version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'geopmpy/version.py')
with open(version_file) as fid:
exec(compile(fid.read(), version_file, 'exec'))
except IOError:
__version__ = '0.0.0'
__beta__ = False
try: # Get the version from git if using standalone geopmpy
__version__ = subprocess.check_output(['git', 'describe']).strip().decode()[1:]
version_components = __version__.split('-')
if len(version_components) > 1: # We are NOT on a tagged release
tag = version_components[0]
patches_since = version_components[1]
git_sha = version_components[2]
__version__ = '{}+dev{}{}'.format(tag, patches_since, git_sha)
with open(version_file, 'w') as vf:
vf.write("__version__ = '{}'\n".format(__version__))
vf.write('__beta__ = {}\n'.format(__beta__))
except OSError as ee:
sys.stderr.write('WARNING: setting version to 0.0.0, error determining version from git - {}\n'.format(ee))
if not os.path.exists('COPYING'):
shutil.copyfile('../COPYING', 'COPYING')
if not os.path.exists('README'):
shutil.copyfile('../README', 'README')
if not os.path.exists('AUTHORS'):
shutil.copyfile('../AUTHORS', 'AUTHORS')
long_description = """\
The python front end to the GEOPM runtime. Includes scripts for
launching the runtime and postprocessing the output data."""
scripts = ["geopmlaunch"]
if __beta__:
scripts += ["geopmplotter", "geopmconvertreport"]
classifiers = ['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: System :: Distributed Computing',
'Topic :: System :: Hardware :: Symmetric Multi-processing',
'Topic :: System :: Power (UPS)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
]
install_requires = ['pandas>=0.23.0',
'natsort>=5.3.2',
'matplotlib>=2.2.2',
'cycler>=0.10.0',
'tables>=3.4.3',
'psutil>=5.4.8',
'cffi>=1.6.0',
'numpy>=1.14.3',
'setuptools>=39.2.0',
'pyyaml>=5.1.0',
'future>=0.17.1']
setup(name='geopmpy',
version=__version__,
description='GEOPM - Global Extensible Open Power Manager',
long_description=long_description,
url='https://geopm.github.io',
download_url='http://download.opensuse.org/repositories/home:/cmcantalupo:/geopm/',
license='BSD-3-Clause',
author='Christopher Cantalupo <[email protected]>, Brad Geltz <[email protected]>',
packages=['geopmpy'],
scripts=scripts,
test_suite='test',
classifiers=classifiers,
install_requires=install_requires,
python_requires='>=3.6')
|
bsd-3-clause
|
robin-lai/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
xubenben/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
23
|
27579
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(
n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a smaller loss
# than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a smaller loss
# than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
|
bsd-3-clause
|
calhewitt/lucid_utils
|
lucid_utils/frameplot.py
|
2
|
1871
|
import matplotlib as mpl
import os
if not "DISPLAY" in os.environ: # Make MPL Work if no display is available
mpl.use('Agg')
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_agg import FigureCanvasAgg
def get_image(frame, colourmode = "BW", normalise=False):
fig = plt.figure(frameon=False, figsize=(256,256), dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
canvas = plt.get_current_fig_manager().canvas
agg = canvas.switch_backends(FigureCanvasAgg)
agg.draw()
s = agg.tostring_rgb()
l, b, w, h = agg.figure.bbox.bounds
w, h = int(w), int(h)
X = np.fromstring(s, np.uint8)
X.shape = h, w, 3
plt.close()
try:
im = Image.fromstring("RGB", (w, h), s)
except Exception:
im = Image.frombytes("RGB", (w, h), s)
return im
def show_frame(frame):
if not "DISPLAY" in os.environ:
raise Exception("No display available")
fig, ax = plt.subplots()
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
cax = ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
fig.colorbar(cax)
plt.show()
def fig_frame(frame):
fig, ax = plt.subplots()
cmap = cm.hot
cmap.set_under("#82bcff")
vm = np.max(frame) if (np.count_nonzero(frame) > 0) else 2
cax = ax.imshow(frame, vmin = 1, vmax=vm, cmap = cmap, interpolation='none')
fig.colorbar(cax)
return fig
def show_blob(blob):
xs, ys = zip(*blob)
sizex, sizey = max(xs) - min(xs), max(ys) - min(ys)
toshow = np.zeros((sizex+3,sizey+3))
for i in range(len(xs)):
toshow[1 + xs[i] - min(xs)][1 + ys[i] - min(ys)] = 1
show_frame(toshow)
|
mit
|
mromanello/CitationExtractor
|
citation_extractor/eval.py
|
1
|
35748
|
# -*- coding: utf-8 -*-
# author: Matteo Romanello, [email protected]
"""
Module containing classes and functions to perform the evaluation of the various steps of the pipeline (NER, RelEx, NED).
%load_ext autoreload
%autoreload 2
import logging
import tabulate
from citation_extractor.Utils.IO import init_logger
init_logger(loglevel=logging.DEBUG)
import pickle
import codecs
import pkg_resources
import pandas as pd
from citation_extractor.eval import evaluate_ned
with codecs.open(pkg_resources.resource_filename("citation_extractor", "data/pickles/test_gold_dataframe.pkl"),"rb") as pickle_file:
testset_gold_df = pd.read_pickle(pickle_file)
with codecs.open(pkg_resources.resource_filename("citation_extractor", "data/pickles/test_target_dataframe_cm1.pkl"),"rb") as pickle_file:
testset_target_df = pd.read_pickle(pickle_file)
ann_dir = "/Users/rromanello/Documents/crex/citation_extractor/citation_extractor/data/aph_corpus/testset/ann/"
scores, error_types, errors = evaluate_ned(testset_gold_df, ann_dir, testset_target_df, strict=True)
"""
from __future__ import division
import pdb # TODO: remove from production code
import sys,logging,re
import os
import glob
import math
from pyCTS import CTS_URN, BadCtsUrnSyntax
from citation_extractor.core import *
from citation_extractor.crfpp_wrap import CRF_classifier
from citation_extractor.Utils import IO
from citation_extractor.io.iob import file_to_instances
from citation_extractor.ned import NIL_URN as NIL_ENTITY
from citation_extractor.io.brat import read_ann_file
from citation_extractor.Utils.IO import init_logger
# TODO: in the long run, remove `SimpleEvaluator` and `CrossEvaluator`
# and generally the `miguno` library as a dependency (use `sklearn` instead)
#from miguno.partitioner import *
#from miguno.crossvalidationdataconstructor import *
import pprint
global logger
logger = logging.getLogger(__name__)
class SimpleEvaluator(object):
"""
>>> import settings #doctest: +SKIP
>>> extractor_1 = citation_extractor(settings) #doctest: +SKIP
>>> se = SimpleEvaluator([extractor_1,],iob_file="data/75-02637.iob") #doctest: +SKIP
>>> se = SimpleEvaluator([extractor_1,],["/Users/56k/phd/code/APh/corpus/by_collection/C2/",]) #doctest: +SKIP
>>> print se.eval() #doctest: +SKIP
TODO: there should be a test also for IOB files w/ POS tag column
"""
def __init__(self,extractors,iob_directories=[],iob_file=None,label_index=-1):
"""
Args:
extractors:
the list of canonical citation extractors to evaluate
iob_test_file:
the file in IOB format to be used for testing and evaluating the extactors
"""
# read the test instances from a list of directories containing the test data
import logging
self.logger = logging.getLogger("CREX.SIMPLEVAL")
if(iob_file is None):
self.logger.debug(iob_directories)
data = []
for directory in iob_directories:
data += IO.read_iob_files(directory,".txt")
self.test_instances = data
else:
self.test_instances = file_to_instances(iob_file)
self.logger.debug("Found %i instances for test"%len(self.test_instances))
self.extractors = extractors
self.output = {}
self.error_matrix = None
self.label_index = label_index
return
def eval(self):
"""
Run the evaluator.
Returns:
TODO
"""
extractor_results = {}
for extractor in self.extractors:
eng = extractor[1]
extractor_name = extractor[0]
input = [[token[0] for token in instance] for instance in self.test_instances if len(instance)>0]
POS = False
if(len(self.test_instances[0][0]) > 2):
self.label_index = 2 # the last one is the label
legacy_features = [[("z_POS",token[1]) for token in instance] for instance in self.test_instances if len(instance)>0]
output = eng.extract(input,legacy_features)
POS = True
else:
output = eng.extract(input)
if(POS):
to_evaluate = [[tuple([token["token"].decode("utf-8"),legacy_features[i][n][1],token["label"].decode("utf-8")]) for n,token in enumerate(instance)] for i,instance in enumerate(output)]
else:
to_evaluate = [[tuple([token["token"].decode("utf-8"),token["label"].decode("utf-8")]) for n,token in enumerate(instance)] for i,instance in enumerate(output)]
results = self.evaluate(to_evaluate,self.test_instances,label_index = self.label_index)
self.output[str(eng)] = self.write_result(to_evaluate,self.test_instances,self.label_index)
eval_results = results[0]
by_tag_results = results[1]
eval_results["f-score"] = self.calc_fscore(eval_results)
eval_results["precision"] = self.calc_precision(eval_results)
eval_results["recall"] = self.calc_recall(eval_results)
by_tag_results = self.calc_stats_by_tag(by_tag_results)
extractor_results[extractor_name] = results
return extractor_results
@staticmethod
def write_result(l_tagged_instances,l_test_instances,label_index=1):
"""
"""
temp = [[(l_test_instances[n][i][0],l_test_instances[n][i][label_index],l_tagged_instances[n][i][label_index]) for i,token in enumerate(instance)] for n,instance in enumerate(l_test_instances)]
return temp
@staticmethod
def print_stats(d_results):
"""
Pretty print of the evaluation stats.
"""
for item in d_results:
print "%s\n%s\n%s"%("="*len(item),item,"="*len(item))
print "%10s\t%10s\t%10s\t%5s\t%5s\t%5s\t%5s"%("f-score","precision","recall","tp","fp","tn","fn")
print "%10f\t%10f\t%10f\t%5i\t%5i\t%5i\t%5i\n"%(d_results[item]["f-sc"],d_results[item]["prec"],d_results[item]["rec"],d_results[item]["true_pos"],d_results[item]["false_pos"],d_results[item]["true_neg"],d_results[item]["false_neg"],)
return
@staticmethod
def read_instances(directories):
result = []
for d in directories:
result += IO.read_iob_files(d)
return result
@staticmethod
def evaluate(l_tagged_instances,l_test_instances,negative_BIO_tag = u'O',label_index=-1):
"""
Evaluates a list of tagged instances against a list of test instances (gold standard):
>>> tagged = [[('cf.','O'),('Hom','O'),('Il.','B-REFAUWORK'),('1.1','I-REFAUWORK'),(';','I-REFAUWORK')]]
>>> test = [[('cf.','O'),('Hom','B-REFAUWORK'),('Il.','I-REFAUWORK'),('1.1','B-REFSCOPE'),(';','O')]]
>>> res = SimpleEvaluator.evaluate(tagged,test)
>>> print res[0]
{'false_pos': 3, 'true_pos': 1, 'true_neg': 1, 'false_neg': 1}
And with tokens having POS information
>>> tagged = [[('cf.','N/A','O'),('Hom','N/A','O'),('Il.','N/A','B-REFAUWORK'),('1.1','N/A','I-REFAUWORK'),(';','N/A','I-REFAUWORK')]]
>>> test = [[('cf.','N/A','O'),('Hom','N/A','B-REFAUWORK'),('Il.','N/A','I-REFAUWORK'),('1.1','N/A','B-REFSCOPE'),(';','N/A','O')]]
>>> print SimpleEvaluator.evaluate(tagged,test,label_index=2)[0]
{'false_pos': 3, 'true_pos': 1, 'true_neg': 1, 'false_neg': 1}
Args:
l_tagged_instances:
A list of instances. Each instance is a list of tokens, the tokens being tuples.
Each tuple has the token (i=0) and the assigned label (i=1).
l_test_instances:
pos_index:
An integer: when set to -1 indicates that there is no POS "column" in the data. Otherwise provides the tuple index
of the POS tag.
Returns:
A dictionary:
{
"true_pos": <int>
,"false_pos": <int>
,"true_neg": <int>
,"false_neg": <int>
}
"""
# TODO: check same lenght and identity of tokens
import logging
l_logger = logging.getLogger('CREX.EVAL')
fp = tp = fn = tn = token_counter = 0
errors_by_tag = {}
labels = ['O','B-AAUTHOR','I-AAUTHOR','B-AWORK','I-AWORK','B-REFAUWORK','I-REFAUWORK','B-REFSCOPE','I-REFSCOPE']
import numpy
error_matrix = numpy.zeros((len(labels),len(labels)),dtype=numpy.int)
error_details = {}
for n,inst in enumerate(l_tagged_instances):
tag_inst = l_tagged_instances[n]
gold_inst = l_test_instances[n]
token_counter += len(tag_inst)
for n,tok in enumerate(tag_inst):
p_fp = p_tp = p_fn = p_tn = 0
gold_token = gold_inst[n][0]
tagged_token = tok[0]
l_logger.debug("Gold token: %s"%gold_token)
l_logger.debug("Tagged token: %s"%tagged_token)
if(label_index != -1):
gold_label = gold_inst[n][label_index]
tagged_label = tok[label_index]
else:
gold_label = gold_inst[n][1]
tagged_label = tok[1]
l_logger.debug("Gold label: %s"%gold_label)
l_logger.debug("Tagged label: %s"%tagged_label)
if(not errors_by_tag.has_key(gold_label)):
errors_by_tag[gold_label] = {"true_pos": 0
,"false_pos": 0
,"true_neg": 0
,"false_neg": 0
}
error_matrix[labels.index(gold_label)][labels.index(tagged_label)] += 1
error = "%s => %s"%(gold_label, tagged_label)
if(gold_label != tagged_label):
if(error_details.has_key(error)):
error_details[error].append(gold_token)
else:
error_details[error] = []
error_details[error].append(gold_token)
if(gold_label != negative_BIO_tag):
l_logger.debug("Label \"%s\" for token \"%s\" is not negative"%(gold_label,gold_token))
if(tagged_label == gold_label):
p_tp += 1
errors_by_tag[gold_label]["true_pos"] += 1
l_logger.info("[%s] \"%s\"=> tagged: %s / gold: %s"%("TP",tagged_token, tagged_label, gold_label))
elif(tagged_label != gold_label):
if(tagged_label == negative_BIO_tag):
p_fn += 1
errors_by_tag[gold_label]["false_neg"] += 1
l_logger.info("[%s] \"%s\"=> tagged: %s / gold: %s"%("FN",tagged_token, tagged_label, gold_label))
else:
p_fp += 1
errors_by_tag[gold_label]["false_pos"] += p_fp
l_logger.info("[%s] \"%s\"=> tagged: %s / gold: %s"%("FP",tagged_token, tagged_label, gold_label))
elif(gold_label == negative_BIO_tag):
l_logger.debug("Label \"%s\" for token \"%s\" is negative"%(gold_label,gold_token))
if(tagged_label == gold_label):
p_tn += 1
errors_by_tag[gold_label]["true_pos"] += 1
l_logger.info("[%s] \"%s\"=> tagged: %s / gold: %s"%("TN",tagged_token, tagged_label, gold_label))
elif(tagged_label != gold_label):
if(tagged_label != negative_BIO_tag):
p_fp += 1
errors_by_tag[gold_label]["false_pos"] += 1
l_logger.info("[%s] \"%s\"=> tagged: %s / gold: %s"%("FP",tagged_token, tagged_label, gold_label))
fp += p_fp
tp += p_tp
fn += p_fn
tn += p_tn
assert (tp+fp+tn+fn) == token_counter
l_logger.debug("asserted %i (tp +fp + tn + fn) == %i (token counter)"%(tp+fp+tn+fn,token_counter))
result = {"true_pos": tp
,"false_pos": fp
,"true_neg": tn
,"false_neg": fn
},errors_by_tag
global_sum = {"true_pos": 0
,"false_pos": 0
,"true_neg": 0
,"false_neg": 0}
for tag in result[1].keys():
for value in result[1][tag]:
global_sum[value]+= result[1][tag][value]
assert (global_sum["true_pos"] + global_sum["false_pos"] + global_sum["false_neg"]) == token_counter
l_logger.debug("asserted %i (tp +fp + fn) == %i (token counter)"%(tp+fp+tn+fn,token_counter))
#SimpleEvaluator.render_error_matrix(error_matrix,labels)
#print pprint.pprint(error_details)
return result
@staticmethod
def render_error_matrix(matrix, labels):
"""
TODO:
Prints the error matrix
"""
print ' %11s'%" ".join(labels)
for row_label, row in zip(labels, matrix):
print '%11s [%s]' % (row_label, ' '.join('%09s' % i for i in row))
return
@staticmethod
def calc_stats_by_tag(d_by_tag_errors):
for tag in d_by_tag_errors:
d_by_tag_errors[tag]["prec"] = SimpleEvaluator.calc_precision(d_by_tag_errors[tag])
d_by_tag_errors[tag]["rec"] = SimpleEvaluator.calc_recall(d_by_tag_errors[tag])
d_by_tag_errors[tag]["f-sc"] = SimpleEvaluator.calc_fscore(d_by_tag_errors[tag])
return d_by_tag_errors
@staticmethod
def calc_stats_by_entity(d_by_tag_errors):
"""
Aggregates results by entity (B-X and I-X are aggregated together.)
Args:
d_by_tag_errors:
a dictionary containing error details by tag
Example:
>>> import core #doctest: +SKIP
>>> from core import citation_extractor #doctest: +SKIP
>>> from eval import SimpleEvaluator #doctest: +SKIP
>>> import base_settings, settings #doctest: +SKIP
>>> extractor_1 = citation_extractor(base_settings) #doctest: +SKIP
>>> se = SimpleEvaluator([extractor_1,],["/Users/56k/phd/code/APh/experiments/C2/",]) #doctest: +SKIP
>>> res = se.eval() #doctest: +SKIP
>>> by_entity = se.calc_stats_by_entity(res[str(extractor_1)][1]) #doctest: +SKIP
"""
overall_errors = d_by_tag_errors
stats_by_entity = {}
for tag in d_by_tag_errors:
"""
logger.debug("(%s) True Positives (tp): %i"%(tag,overall_errors[tag]['true_pos']))
logger.debug("(%s) False Positives (fp): %i"%(tag,overall_errors[tag]['false_pos']))
logger.debug("(%s) False Negatives (fn): %i"%(tag,overall_errors[tag]['false_neg']))
logger.debug("(%s) Total labels in test set: %i"%(tag,test_label_counts[tag]))
logger.debug("(%s) precision: %f"%(tag,details[tag]["prec"]))
logger.debug("(%s) recall: %f"%(tag,details[tag]["rec"]))
logger.debug("(%s) F-score: %f"%(tag,details[tag]["f-sc"]))
logger.debug("************")
"""
if(tag != "O"):
aggreg_tag = tag.replace("B-","").replace("I-","")
if(not stats_by_entity.has_key(aggreg_tag)):
stats_by_entity[aggreg_tag] = {
"true_pos":0,
"true_neg":0,
"false_pos":0,
"false_neg":0,
}
stats_by_entity[aggreg_tag]['false_pos'] += overall_errors[tag]['false_pos']
stats_by_entity[aggreg_tag]['true_pos'] += overall_errors[tag]['true_pos']
stats_by_entity[aggreg_tag]['true_neg'] += overall_errors[tag]['true_neg']
stats_by_entity[aggreg_tag]['false_neg'] += overall_errors[tag]['false_neg']
for aggreg_tag in stats_by_entity:
stats_by_entity[aggreg_tag]['prec'] = SimpleEvaluator.calc_precision(stats_by_entity[aggreg_tag])
stats_by_entity[aggreg_tag]['rec'] = SimpleEvaluator.calc_recall(stats_by_entity[aggreg_tag])
stats_by_entity[aggreg_tag]['f-sc'] = SimpleEvaluator.calc_fscore(stats_by_entity[aggreg_tag])
return stats_by_entity
@staticmethod
def calc_precision(d_errors):
"""
Calculates the precision given the input error dictionary.
"""
if(d_errors["true_pos"] + d_errors["false_pos"] == 0):
return 0
else:
return d_errors["true_pos"] / float(d_errors["true_pos"] + d_errors["false_pos"])
@staticmethod
def calc_recall(d_errors):
"""
Calculates the recall given the input error dictionary.
"""
if(d_errors["true_pos"] + d_errors["false_neg"] == 0):
return 0
else:
return d_errors["true_pos"] / float(d_errors["true_pos"] + d_errors["false_neg"])
@staticmethod
def calc_accuracy(d_errors):
"""
Calculates the accuracy given the input error dictionary.
"""
acc = (d_errors["true_pos"] + d_errors["true_neg"]) / float(d_errors["true_pos"] + d_errors["false_pos"] + d_errors["true_neg"] + d_errors["false_neg"])
return acc
@staticmethod
def calc_fscore(d_errors):
"""
Calculates the accuracy given the input error dictionary.
"""
prec = SimpleEvaluator.calc_precision(d_errors)
rec = SimpleEvaluator.calc_recall(d_errors)
if(prec == 0 and rec == 0):
return 0
else:
return 2*(float(prec * rec) / float(prec + rec))
class CrossEvaluator(SimpleEvaluator): # TODO: remove
"""
>>> import settings #doctest: +SKIP
>>> import pprint #doctest: +SKIP
>>> base_settings.DEBUG = False #doctest: +SKIP
>>> extractor_1 = settings #doctest: +SKIP
>>> test_files = ["/Users/56k/phd/code/APh/experiments/eff_cand_1_a/","/Users/56k/phd/code/APh/experiments/C1/","/Users/56k/phd/code/APh/experiments/C2/",] #doctest: +SKIP
>>> ce = CrossEvaluator([extractor_1,],test_files,culling_size=100,fold_number=10,evaluation_dir="/Users/56k/Downloads/eval_temp/") #doctest: +SKIP
>>> result = ce.run() #doctest: +SKIP
>>> pprint.pprint(result) #doctest: +SKIP
"""
def __init__(self,extractors,iob_test_file,culling_size=None,fold_number=10,evaluation_dir="./",label_index=-1):
super(CrossEvaluator, self).__init__(extractors,iob_test_file,label_index=label_index)
self.culling_size = culling_size
self.fold_number = fold_number
self.evaluation_dir = evaluation_dir
import logging
self.logger = init_logger(verbose=True,log_name='CREX.CROSSEVAL')
if(self.culling_size is not None):
self.logger.info("Culling set at %i"%self.culling_size)
import random
random.shuffle(self.test_instances)
self.culled_instances = self.test_instances[:self.culling_size]
else:
self.logger.info("Culling not set.")
self.logger.info("Evaluation type: %i-fold cross evaluations"%self.fold_number)
self.logger.info("Training/Test set contains %i instances."%len(self.test_instances))
self.create_datasets()
def create_datasets(self):
"""
TODO
"""
positive_labels = ["B-REFSCOPE","I-REFSCOPE","B-AAUTHOR","I-AAUTHOR","B-REFAUWORK","I-REFAUWORK","B-AWORK","I-AWORK"]
if(self.culling_size is not None):
positives_negatives = [(n,IO.instance_contains_label(inst,positive_labels)) for n,inst in enumerate(self.culled_instances)]
positives = [self.culled_instances[i[0]] for i in positives_negatives if i[1] is True]
negatives = [self.culled_instances[i[0]] for i in positives_negatives if i[1] is False]
else:
positives_negatives = [(n,IO.instance_contains_label(inst,positive_labels)) for n,inst in enumerate(self.test_instances)]
positives = [self.test_instances[i[0]] for i in positives_negatives if i[1] is True]
negatives = [self.test_instances[i[0]] for i in positives_negatives if i[1] is False]
self.logger.info("%i Positive instances"%len(positives))
self.logger.info("%i Negative instances"%len(negatives))
self.logger.info("%i Total instances"%(len(positives)+len(negatives)))
self.dataSets_iterator = CrossValidationDataConstructor(positives, negatives, numPartitions=self.fold_number, randomize=False).getDataSets()
pass
def run(self):
"""
TODO
"""
iterations = []
results = {}
results_by_entity = {}
# first lets' create test and train set for each iteration
for x,iter in enumerate(self.dataSets_iterator):
self.logger.info("Iteration %i"%(x+1))
train_set=[]
test_set=[]
for y,set in enumerate(iter):
for n,group in enumerate(set):
if(y==0):
train_set+=group
else:
test_set+=group
iterations.append((train_set,test_set))
# let's go through all the iterations
for i,iter in enumerate(iterations):
results["iter-%i"%(i+1)] = {}
results_by_entity["iter-%i"%(i+1)] = {}
train_file="%sfold_%i.train"%(self.evaluation_dir,i+1)
test_file="%sfold_%i.test"%(self.evaluation_dir,i+1)
IO.write_iob_file(iter[0],train_file)
IO.write_iob_file(iter[1],test_file)
# the following line is a bit of a workaround
# to avoid recomputing the features when training
# each new classifier, I take them from the file created
# to train the CRF model (which should always be the first extractor
# to be evaluated).
filename = "%sfold_%i.train.train"%(self.extractors[0][1].TEMP_DIR,(i+1))
f=codecs.open(filename,'r','utf-8')
data = f.read()
f.close()
feature_sets=[[[token.split('\t')[:len(token.split('\t'))-1],token.split('\t')[len(token.split('\t'))-1:]] for token in instance.split('\n')] for instance in data.split('\n\n')]
order = FeatureExtractor().get_feature_order()
labelled_feature_sets=[]
for instance in feature_sets:
for token in instance:
temp = [{order[n]:feature for n,feature in enumerate(token[0])},token[1][0]]
labelled_feature_sets.append(temp)
self.logger.info("read %i labelled instances"%len(feature_sets))
for n,extractor in enumerate(self.extractors):
extractor_settings = extractor[1]
extractor_name = extractor[0]
results["iter-%i"%(i+1)][extractor_name] = {}
self.logger.info("Running iteration #%i with extractor %s"%(i+1,extractor_name))
self.logger.info(train_file)
self.logger.info(test_file)
self.logger.info(extractor_settings)
extractor_settings.DATA_FILE = train_file
if(extractor_settings.CLASSIFIER is not None):
extractor = citation_extractor(extractor_settings, extractor_settings.CLASSIFIER,labelled_feature_sets)
else:
extractor = citation_extractor(extractor_settings)
self.logger.info(extractor.classifier)
se = SimpleEvaluator([(extractor_name, extractor),],iob_file=test_file)
results["iter-%i"%(i+1)][extractor_name] = se.eval()[extractor_name][0]
results_by_entity["iter-%i"%(i+1)][extractor_name] = SimpleEvaluator.calc_stats_by_entity(se.eval()[extractor_name][1])
#self.logger.info(results_by_entity["iter-%i"%(i+1)][extractor_name])
return results,results_by_entity
def evaluate_ned(goldset_data, gold_directory, target_data, strict=False):
"""
Evaluate the Named Entity Disambigutation taking in input the goldset data, the
goldset directory and a target directory contaning files in the brat stand-off annotation format.
The F1 score is computed over the macro-averaged precision and recall.
self.
:param goldset_data: a `pandas.DataFrame` with the goldset data read via `citation_extractor.Utils.IO.load_brat_data`
:param gold_directory: the path to the gold set
:param target: a `pandas.DataFrame` with the target data read via `citation_extractor.Utils.IO.load_brat_data`
:param strict: whether to consider consecutive references to the same ancient work only once (i.e. `scope`
relations with identical arg1).
:return: a tuple where [0] is a dictionary with keys "precision", "recall", "fscore";
[1] is a list of dictionaries (keys "true_pos", "true_neg", "false_pos" and "false_neg"), one for each document;
[2] is a dictionary containing the actual URNs (gold and predicted) grouped by error types
or None if the evaluation is aborted.
"""
# variables to store aggregated results and errors
disambig_results = []
disambig_errors = {"true_pos":[], "true_neg":[], "false_pos":[], "false_neg":[]}
aggregated_results = {"true_pos":0, "true_neg":0, "false_pos":0, "false_neg":0}
results_by_entity_type = {}
scores = {}
# check that number/names of .ann files is the same
doc_ids_gold = list(set(goldset_data["doc_id"]))
docs_ids_target = list(set(target_data["doc_id"]))
try:
assert sorted(doc_ids_gold)==sorted(docs_ids_target)
except AssertionError as e:
logger.error("Evaluation aborted: the script expects identical filenames in gold and target directory.")
return (None, None, None)
logger.info("Evaluating NED: there are %i documents" % len(doc_ids_gold))
for doc_id in doc_ids_gold:
# create a dictionary like {"T1":"urn:cts:greekLit:tlg0012", }
gold_disambiguations = {id.split('-')[2]: row["urn_clean"]
for id, row in goldset_data[goldset_data["doc_id"]==doc_id].iterrows()}
# pass on all relations data
gold_entities, gold_relations = read_ann_file("%s.txt" % doc_id, os.path.join(gold_directory, ""))[:2]
# create a dictionary like {"T1":"urn:cts:greekLit:tlg0012", }
target_disambiguations = {id.split('-')[2]: row["urn_clean"]
for id, row in target_data[target_data["doc_id"]==doc_id].iterrows()}
# process each invidual file
file_result, file_errors, result_by_entity_type = _evaluate_ned_file(doc_id
, gold_disambiguations
, gold_entities
, gold_relations
, target_disambiguations
, strict)
# add error details
for error_type in file_errors:
disambig_errors[error_type]+=file_errors[error_type]
for entity_type in result_by_entity_type:
if not entity_type in results_by_entity_type:
results_by_entity_type[entity_type] = {}
for error_type in result_by_entity_type[entity_type]:
if not error_type in results_by_entity_type[entity_type]:
results_by_entity_type[entity_type] = {"true":0, "false":0}
results_by_entity_type[entity_type][error_type] += result_by_entity_type[entity_type][error_type]
# NB: if the file contains only NIL entities we exclude it from the counts
# used to computed the macro-averaged precision and recall
NIL_entities = [urn for urn in gold_disambiguations.values() if urn == NIL_ENTITY]
non_NIL_entities = [urn for urn in gold_disambiguations.values() if urn != NIL_ENTITY]
if len(non_NIL_entities)>0:
disambig_results.append(file_result)
elif len(non_NIL_entities)==0:
logger.debug("%s contains only NIL entities (or is empty): not considered when computing macro-averaged measures" % doc_id)
# still, we include it in the counts used to compute the global accuracy
for key in file_result:
aggregated_results[key]+=file_result[key]
precisions = [SimpleEvaluator.calc_precision(r) for r in disambig_results]
recalls = [SimpleEvaluator.calc_recall(r) for r in disambig_results]
assert len(precisions)==len(recalls)
scores = {
"precision" : sum(precisions)/len(precisions)
, "recall" : sum(recalls)/len(recalls)
}
prec, rec = scores["precision"], scores["recall"]
scores["fscore"] = 0.0 if prec == 0.0 and rec == 0.0 else 2*(float(prec * rec) / float(prec + rec))
scores["accuracy"] = (aggregated_results["true_pos"] + aggregated_results["true_neg"]) \
/ (aggregated_results["true_pos"] + aggregated_results["true_neg"] \
+ aggregated_results["false_neg"] + aggregated_results["false_pos"])
logger.info("Computing accuracy: %i (tp) + %i (tn) / %i (tp) + %i (tn) + %i (fn) + %i (fp" % (
aggregated_results["true_pos"]
, aggregated_results["true_neg"]
, aggregated_results["true_pos"]
, aggregated_results["true_neg"]
, aggregated_results["false_neg"]
, aggregated_results["false_pos"]
))
assert sum([results_by_entity_type[ent_type][err_type]
for ent_type in results_by_entity_type
for err_type in results_by_entity_type[ent_type]]) == sum(aggregated_results.values())
logger.info("Precision and recall averaged over %i documents (documents with NIL-entities only are excluded)" % len(precisions))
print("Precision %.2f%%" % (scores["precision"]*100))
print("Recall %.2f%%" % (scores["recall"]*100))
print("Fscore %.2f%%" % (scores["fscore"]*100))
print("Accuracy %.2f%%" % (scores["accuracy"]*100))
accuracy_by_type = {}
print("\nAccuracy by type:")
for entity_type in sorted(results_by_entity_type.keys()):
true_matches = results_by_entity_type[entity_type]["true"]
false_matches = results_by_entity_type[entity_type]["false"]
total_matches = true_matches + false_matches
accuracy_by_type[entity_type] = true_matches / total_matches
print("%s: %.2f%%" % (entity_type, (accuracy_by_type[entity_type] * 100)))
return (scores, accuracy_by_type, disambig_results, disambig_errors)
def _evaluate_ned_file(docid, gold_disambiguations, gold_entities, gold_relations, target_disambiguations, strict=False):
"""
Evaluates NED of a single file.
"""
# TODO expect data in this format
unique_reference_urns = set() # for multiple relations having as arg1 entity X, count X only once
result = {"true_pos":0, "false_pos":0 ,"false_neg":0 ,"true_neg":0}
result_by_entity_type = {}
errors = {"true_pos":[], "false_pos":[], "false_neg":[], "true_neg":[]}
try:
assert len(gold_disambiguations)>0 and len(target_disambiguations)>0
except AssertionError as e:
logger.info("No disambiguations to evaluate in file %s" % docid)
return None, errors
for disambiguation_id in gold_disambiguations:
is_relation_disambiguation = True if disambiguation_id.startswith('R') else False
try:
gold_disambiguation = gold_disambiguations[disambiguation_id]
gold_urn = CTS_URN(gold_disambiguation.strip()).get_urn_without_passage()
except BadCtsUrnSyntax as e:
logger.error("Skipping disambiguation %s-%s: gold URN malformed (\"%s\")" % (docid, disambiguation_id, gold_disambiguation))
return result, errors
try:
target_disambiguation = target_disambiguations[disambiguation_id]
target_urn = CTS_URN(target_disambiguation.strip()).get_urn_without_passage()
except BadCtsUrnSyntax as e:
logger.error("Skipping disambiguation %s-%s: target URN malformed (\"%s\")" % (docid, disambiguation_id, target_disambiguation))
continue
except AttributeError as e:
logger.error("Disambiguation %s-%s: target URN is None (\"%s\")" % (docid, disambiguation_id, target_disambiguation))
target_urn = None
except KeyError as e:
logger.debug("[%s] %s not contained in target: assuming a NIL entity" % (docid, disambiguation_id))
target_urn = NIL_ENTITY
continue
arg1_entity_id = None
if is_relation_disambiguation:
logger.debug("[%s] unique_reference_urns=%s" % (docid, unique_reference_urns))
arg1_entity_id = gold_relations[disambiguation_id]['arguments'][0]
if strict:
if "%s-R" % arg1_entity_id in unique_reference_urns:
logger.debug("%s was already considered; skipping this one" % "%s-R" % arg1_entity_id)
continue
else:
unique_reference_urns.add("%s-R" % arg1_entity_id)
# classify the error by type
if gold_urn == NIL_ENTITY:
error_type = "true_neg" if gold_urn == target_urn else "false_pos"
else:
# gold_urn is not a NIL entity
if target_urn == NIL_ENTITY:
error_type = "false_neg"
# neither gold_urn nor target_urn are NIL entities
else:
if gold_urn == target_urn:
error_type = "true_pos"
else:
error_type = "false_pos"
if gold_urn != NIL_ENTITY:
entity_type = gold_entities[disambiguation_id]["entity_type"] if not is_relation_disambiguation else "scope-%s" % gold_entities[arg1_entity_id]["entity_type"]
else:
entity_type = "NIL"
error_by_entity_type = "true" if error_type == "true_pos" or error_type == "true_neg" else "false"
if not entity_type in result_by_entity_type:
result_by_entity_type[entity_type] = {"true":0, "false":0}
result_by_entity_type[entity_type][error_by_entity_type]+=1
result[error_type]+=1
errors[error_type].append((docid, disambiguation_id, gold_urn, target_urn))
logger.debug("[%s-%s] Comparing %s with %s => %s" % (docid, disambiguation_id, gold_urn, target_urn, error_type))
logger.debug("Evaluated file %s: %s" % (docid, result))
return result, errors, result_by_entity_type
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
|
gpl-3.0
|
TNT-Samuel/Coding-Projects
|
DNS Server/Source/Lib/site-packages/winpython/disthelpers.py
|
2
|
32095
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2011 CEA
# Pierre Raybaut
# Licensed under the terms of the CECILL License
# (see guidata/__init__.py for details)
# pylint: disable=W0613
"""
disthelpers
-----------
The ``guidata.disthelpers`` module provides helper functions for Python
package distribution on Microsoft Windows platforms with ``py2exe`` or on
all platforms thanks to ``cx_Freeze``.
"""
from __future__ import print_function
import sys
import os
import os.path as osp
import shutil
import traceback
import atexit
import imp
from subprocess import Popen, PIPE
import warnings
#==============================================================================
# Module, scripts, programs
#==============================================================================
def get_module_path(modname):
"""Return module *modname* base path"""
module = sys.modules.get(modname, __import__(modname))
return osp.abspath(osp.dirname(module.__file__))
#==============================================================================
# Dependency management
#==============================================================================
def get_changeset(path, rev=None):
"""Return Mercurial repository *path* revision number"""
args = ['hg', 'parent']
if rev is not None:
args += ['--rev', str(rev)]
process = Popen(args, stdout=PIPE, stderr=PIPE, cwd=path, shell=True)
try:
return process.stdout.read().splitlines()[0].split()[1]
except IndexError:
raise RuntimeError(process.stderr.read())
def prepend_module_to_path(module_path):
"""
Prepend to sys.path module located in *module_path*
Return string with module infos: name, revision, changeset
Use this function:
1) In your application to import local frozen copies of internal libraries
2) In your py2exe distributed package to add a text file containing the returned string
"""
if not osp.isdir(module_path):
# Assuming py2exe distribution
return
sys.path.insert(0, osp.abspath(module_path))
changeset = get_changeset(module_path)
name = osp.basename(module_path)
prefix = "Prepending module to sys.path"
message = prefix + ("%s [revision %s]" % (name, changeset)
).rjust(80 - len(prefix), ".")
print(message, file=sys.stderr)
if name in sys.modules:
sys.modules.pop(name)
nbsp = 0
for modname in sys.modules.keys():
if modname.startswith(name + '.'):
sys.modules.pop(modname)
nbsp += 1
warning = '(removed %s from sys.modules' % name
if nbsp:
warning += ' and %d subpackages' % nbsp
warning += ')'
print(warning.rjust(80), file=sys.stderr)
return message
def prepend_modules_to_path(module_base_path):
"""Prepend to sys.path all modules located in *module_base_path*"""
if not osp.isdir(module_base_path):
# Assuming py2exe distribution
return
fnames = [osp.join(module_base_path, name)
for name in os.listdir(module_base_path)]
messages = [prepend_module_to_path(dirname)
for dirname in fnames if osp.isdir(dirname)]
return os.linesep.join(messages)
#==============================================================================
# Distribution helpers
#==============================================================================
def _remove_later(fname):
"""Try to remove file later (at exit)"""
def try_to_remove(fname):
if osp.exists(fname):
os.remove(fname)
atexit.register(try_to_remove, osp.abspath(fname))
def get_msvc_version(python_version):
"""Return Microsoft Visual C++ version used to build this Python version"""
if python_version is None:
python_version = '2.7'
warnings.warn("assuming Python 2.7 target")
if python_version in ('2.6', '2.7', '3.0', '3.1', '3.2'):
# Python 2.6-2.7, 3.0-3.2 were built with Visual Studio 9.0.21022.8
# (i.e. Visual C++ 2008, not Visual C++ 2008 SP1!)
return "9.0.21022.8"
elif python_version in ('3.3', '3.4'):
# Python 3.3+ were built with Visual Studio 10.0.30319.1
# (i.e. Visual C++ 2010)
return '10.0'
elif python_version in ('3.5', '3.6'):
return '15.0'
elif python_version in ('3.7', '3.8'):
return '15.0'
else:
raise RuntimeError("Unsupported Python version %s" % python_version)
def get_msvc_dlls(msvc_version, architecture=None):
"""Get the list of Microsoft Visual C++ DLLs associated to
architecture and Python version, create the manifest file.
architecture: integer (32 or 64) -- if None, take the Python build arch
python_version: X.Y"""
current_architecture = 64 if sys.maxsize > 2**32 else 32
if architecture is None:
architecture = current_architecture
filelist = []
# simple vs2015 situation: nothing (system dll)
if msvc_version == '14.0':
return filelist
msvc_major = msvc_version.split('.')[0]
msvc_minor = msvc_version.split('.')[1]
if msvc_major == '9':
key = "1fc8b3b9a1e18e3b"
atype = "" if architecture == 64 else "win32"
arch = "amd64" if architecture == 64 else "x86"
groups = {
'CRT': ('msvcr90.dll', 'msvcp90.dll', 'msvcm90.dll'),
# 'OPENMP': ('vcomp90.dll',)
}
for group, dll_list in groups.items():
dlls = ''
for dll in dll_list:
dlls += ' <file name="%s" />%s' % (dll, os.linesep)
manifest =\
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) Microsoft Corporation. All rights reserved. -->
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<noInheritable/>
<assemblyIdentity
type="%(atype)s"
name="Microsoft.VC90.%(group)s"
version="%(version)s"
processorArchitecture="%(arch)s"
publicKeyToken="%(key)s"
/>
%(dlls)s</assembly>
""" % dict(version=msvc_version, key=key, atype=atype, arch=arch,
group=group, dlls=dlls)
vc90man = "Microsoft.VC90.%s.manifest" % group
open(vc90man, 'w').write(manifest)
_remove_later(vc90man)
filelist += [vc90man]
winsxs = osp.join(os.environ['windir'], 'WinSxS')
vcstr = '%s_Microsoft.VC90.%s_%s_%s' % (arch, group,
key, msvc_version)
for fname in os.listdir(winsxs):
path = osp.join(winsxs, fname)
if osp.isdir(path) and fname.lower().startswith(vcstr.lower()):
for dllname in os.listdir(path):
filelist.append(osp.join(path, dllname))
break
else:
raise RuntimeError("Microsoft Visual C++ %s DLLs version %s "\
"were not found" % (group, msvc_version))
elif msvc_major == '10' or msvc_major == '15': # 15 for vs 2015
namelist = [name % (msvc_major + msvc_minor) for name in
(
'msvcp%s.dll', 'msvcr%s.dll',
'vcomp%s.dll',
)]
if msvc_major == '15':
namelist = [name % ('14' + msvc_minor) for name in
(
'vcruntime%s.dll', 'msvcp%s.dll', 'vccorlib%s.dll',
'concrt%s.dll','vcomp%s.dll',
)]
windir = os.environ['windir']
is_64bit_windows = osp.isdir(osp.join(windir, "SysWOW64"))
# Reminder: WoW64 (*W*indows 32-bit *o*n *W*indows *64*-bit) is a
# subsystem of the Windows operating system capable of running 32-bit
# applications and is included on all 64-bit versions of Windows
# (source: http://en.wikipedia.org/wiki/WoW64)
#
# In other words, "SysWOW64" contains 64-bit DLL and applications,
# whereas "System32" contains 64-bit DLL and applications on a 64-bit
# system.
sysdir = "System32"
if not is_64bit_windows and architecture == 64:
raise RuntimeError("Can't find 64-bit MSVC DLLs on a 32-bit OS")
if is_64bit_windows and architecture == 32:
sysdir = "SysWOW64"
for dllname in namelist:
fname = osp.join(windir, sysdir, dllname)
print('searching', fname )
if osp.exists(fname):
filelist.append(fname)
else:
raise RuntimeError("Microsoft Visual C++ DLLs version %s "\
"were not found" % msvc_version)
else:
raise RuntimeError("Unsupported MSVC version %s" % msvc_version)
return filelist
def create_msvc_data_files(architecture=None, python_version=None,
verbose=False):
"""Including Microsoft Visual C++ DLLs"""
msvc_version = get_msvc_version(python_version)
filelist = get_msvc_dlls(msvc_version, architecture=architecture)
print(create_msvc_data_files.__doc__)
if verbose:
for name in filelist:
print(" ", name)
msvc_major = msvc_version.split('.')[0]
if msvc_major == '9':
return [("Microsoft.VC90.CRT", filelist),]
else:
return [("", filelist),]
def to_include_files(data_files):
"""Convert data_files list to include_files list
data_files:
* this is the ``py2exe`` data files format
* list of tuples (dest_dirname, (src_fname1, src_fname2, ...))
include_files:
* this is the ``cx_Freeze`` data files format
* list of tuples ((src_fname1, dst_fname1),
(src_fname2, dst_fname2), ...))
"""
include_files = []
for dest_dir, fnames in data_files:
for source_fname in fnames:
dest_fname = osp.join(dest_dir, osp.basename(source_fname))
include_files.append((source_fname, dest_fname))
return include_files
def strip_version(version):
"""Return version number with digits only
(Windows does not support strings in version numbers)"""
return version.split('beta')[0].split('alpha'
)[0].split('rc')[0].split('dev')[0]
def remove_dir(dirname):
"""Remove directory *dirname* and all its contents
Print details about the operation (progress, success/failure)"""
print("Removing directory '%s'..." % dirname, end=' ')
try:
shutil.rmtree(dirname, ignore_errors=True)
print("OK")
except Exception:
print("Failed!")
traceback.print_exc()
class Distribution(object):
"""Distribution object
Help creating an executable using ``py2exe`` or ``cx_Freeze``
"""
DEFAULT_EXCLUDES = ['Tkconstants', 'Tkinter', 'tcl', 'tk', 'wx',
'_imagingtk', 'curses', 'PIL._imagingtk', 'ImageTk',
'PIL.ImageTk', 'FixTk', 'bsddb', 'email',
'pywin.debugger', 'pywin.debugger.dbgcon',
'matplotlib']
DEFAULT_INCLUDES = []
DEFAULT_BIN_EXCLUDES = ['MSVCP100.dll', 'MSVCP90.dll', 'w9xpopen.exe',
'MSVCP80.dll', 'MSVCR80.dll']
DEFAULT_BIN_INCLUDES = []
DEFAULT_BIN_PATH_INCLUDES = []
DEFAULT_BIN_PATH_EXCLUDES = []
def __init__(self):
self.name = None
self.version = None
self.description = None
self.target_name = None
self._target_dir = None
self.icon = None
self.data_files = []
self.includes = self.DEFAULT_INCLUDES
self.excludes = self.DEFAULT_EXCLUDES
self.bin_includes = self.DEFAULT_BIN_INCLUDES
self.bin_excludes = self.DEFAULT_BIN_EXCLUDES
self.bin_path_includes = self.DEFAULT_BIN_PATH_INCLUDES
self.bin_path_excludes = self.DEFAULT_BIN_PATH_EXCLUDES
self.msvc = os.name == 'nt'
self._py2exe_is_loaded = False
self._pyqt4_added = False
self._pyside_added = False
# Attributes relative to cx_Freeze:
self.executables = []
@property
def target_dir(self):
"""Return target directory (default: 'dist')"""
dirname = self._target_dir
if dirname is None:
return 'dist'
else:
return dirname
@target_dir.setter # analysis:ignore
def target_dir(self, value):
self._target_dir = value
def setup(self, name, version, description, script,
target_name=None, target_dir=None, icon=None,
data_files=None, includes=None, excludes=None,
bin_includes=None, bin_excludes=None,
bin_path_includes=None, bin_path_excludes=None, msvc=None):
"""Setup distribution object
Notes:
* bin_path_excludes is specific to cx_Freeze (ignored if it's None)
* if msvc is None, it's set to True by default on Windows
platforms, False on non-Windows platforms
"""
self.name = name
self.version = strip_version(version) if os.name == 'nt' else version
self.description = description
assert osp.isfile(script)
self.script = script
self.target_name = target_name
self.target_dir = target_dir
self.icon = icon
if data_files is not None:
self.data_files += data_files
if includes is not None:
self.includes += includes
if excludes is not None:
self.excludes += excludes
if bin_includes is not None:
self.bin_includes += bin_includes
if bin_excludes is not None:
self.bin_excludes += bin_excludes
if bin_path_includes is not None:
self.bin_path_includes += bin_path_includes
if bin_path_excludes is not None:
self.bin_path_excludes += bin_path_excludes
if msvc is not None:
self.msvc = msvc
if self.msvc:
try:
self.data_files += create_msvc_data_files()
except IOError:
print("Setting the msvc option to False "\
"will avoid this error", file=sys.stderr)
raise
# cx_Freeze:
self.add_executable(self.script, self.target_name, icon=self.icon)
def add_text_data_file(self, filename, contents):
"""Create temporary data file *filename* with *contents*
and add it to *data_files*"""
open(filename, 'wb').write(contents)
self.data_files += [("", (filename, ))]
_remove_later(filename)
def add_data_file(self, filename, destdir=''):
self.data_files += [(destdir, (filename, ))]
#------ Adding packages
def add_pyqt4(self):
"""Include module PyQt4 to the distribution"""
if self._pyqt4_added:
return
self._pyqt4_added = True
self.includes += ['sip', 'PyQt4.Qt', 'PyQt4.QtSvg', 'PyQt4.QtNetwork']
import PyQt4
pyqt_path = osp.dirname(PyQt4.__file__)
# Configuring PyQt4
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.msvc:
vc90man = "Microsoft.VC90.CRT.manifest"
pyqt_tmp = 'pyqt_tmp'
if osp.isdir(pyqt_tmp):
shutil.rmtree(pyqt_tmp)
os.mkdir(pyqt_tmp)
vc90man_pyqt = osp.join(pyqt_tmp, vc90man)
man = open(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
open(vc90man_pyqt, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyqt_path,
"plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.msvc and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyqt)
self.data_files.append( (dirpath[len(pyqt_path)+len(os.pathsep):],
filelist) )
if self.msvc:
atexit.register(remove_dir, pyqt_tmp)
# Including french translation
fr_trans = osp.join(pyqt_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_pyside(self):
"""Include module PySide to the distribution"""
if self._pyside_added:
return
self._pyside_added = True
self.includes += ['PySide.QtDeclarative', 'PySide.QtHelp',
'PySide.QtMultimedia', 'PySide.QtNetwork',
'PySide.QtOpenGL', 'PySide.QtScript',
'PySide.QtScriptTools', 'PySide.QtSql',
'PySide.QtSvg', 'PySide.QtTest',
'PySide.QtUiTools', 'PySide.QtWebKit',
'PySide.QtXml', 'PySide.QtXmlPatterns']
import PySide
pyside_path = osp.dirname(PySide.__file__)
# Configuring PySide
conf = os.linesep.join(["[Paths]", "Prefix = .", "Binaries = ."])
self.add_text_data_file('qt.conf', conf)
# Including plugins (.svg icons support, QtDesigner support, ...)
if self.msvc:
vc90man = "Microsoft.VC90.CRT.manifest"
os.mkdir('pyside_tmp')
vc90man_pyside = osp.join('pyside_tmp', vc90man)
man = open(vc90man, "r").read().replace('<file name="',
'<file name="Microsoft.VC90.CRT\\')
open(vc90man_pyside, 'w').write(man)
for dirpath, _, filenames in os.walk(osp.join(pyside_path, "plugins")):
filelist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1] in ('.dll', '.py')]
if self.msvc and [f for f in filelist
if osp.splitext(f)[1] == '.dll']:
# Where there is a DLL build with Microsoft Visual C++ 2008,
# there must be a manifest file as well...
# ...congrats to Microsoft for this great simplification!
filelist.append(vc90man_pyside)
self.data_files.append(
(dirpath[len(pyside_path)+len(os.pathsep):], filelist) )
# Replacing dlls found by cx_Freeze by the real PySide Qt dlls:
# (http://qt-project.org/wiki/Packaging_PySide_applications_on_Windows)
dlls = [osp.join(pyside_path, fname)
for fname in os.listdir(pyside_path)
if osp.splitext(fname)[1] == '.dll']
self.data_files.append( ('', dlls) )
if self.msvc:
atexit.register(remove_dir, 'pyside_tmp')
# Including french translation
fr_trans = osp.join(pyside_path, "translations", "qt_fr.qm")
if osp.exists(fr_trans):
self.data_files.append(('translations', (fr_trans, )))
def add_qt_bindings(self):
"""Include Qt bindings, i.e. PyQt4 or PySide"""
try:
imp.find_module('PyQt4')
self.add_modules('PyQt4')
except ImportError:
self.add_modules('PySide')
def add_matplotlib(self):
"""Include module Matplotlib to the distribution"""
if 'matplotlib' in self.excludes:
self.excludes.pop(self.excludes.index('matplotlib'))
try:
import matplotlib.numerix # analysis:ignore
self.includes += ['matplotlib.numerix.ma',
'matplotlib.numerix.fft',
'matplotlib.numerix.linear_algebra',
'matplotlib.numerix.mlab',
'matplotlib.numerix.random_array']
except ImportError:
pass
self.add_module_data_files('matplotlib', ('mpl-data', ),
('.conf', '.glade', '', '.png', '.svg',
'.xpm', '.ppm', '.npy', '.afm', '.ttf'))
def add_modules(self, *module_names):
"""Include module *module_name*"""
for module_name in module_names:
print("Configuring module '%s'" % module_name)
if module_name == 'PyQt4':
self.add_pyqt4()
elif module_name == 'PySide':
self.add_pyside()
elif module_name == 'scipy.io':
self.includes += ['scipy.io.matlab.streams']
elif module_name == 'matplotlib':
self.add_matplotlib()
elif module_name == 'h5py':
import h5py
for attr in ['_stub', '_sync', 'utils', '_conv', '_proxy',
'defs']:
if hasattr(h5py, attr):
self.includes.append('h5py.%s' % attr)
if self.bin_path_excludes is not None and os.name == 'nt':
# Specific to cx_Freeze on Windows: avoid including a zlib dll
# built with another version of Microsoft Visual Studio
self.bin_path_excludes += [r'C:\Program Files',
r'C:\Program Files (x86)']
self.data_files.append( # necessary for cx_Freeze only
('', (osp.join(get_module_path('h5py'), 'zlib1.dll'), ))
)
elif module_name in ('docutils', 'rst2pdf', 'sphinx'):
self.includes += ['docutils.writers.null',
'docutils.languages.en',
'docutils.languages.fr']
if module_name == 'rst2pdf':
self.add_module_data_files("rst2pdf", ("styles", ),
('.json', '.style'),
copy_to_root=True)
if module_name == 'sphinx':
import sphinx.ext
for fname in os.listdir(osp.dirname(sphinx.ext.__file__)):
if osp.splitext(fname)[1] == '.py':
modname = 'sphinx.ext.%s' % osp.splitext(fname)[0]
self.includes.append(modname)
elif module_name == 'pygments':
self.includes += ['pygments', 'pygments.formatters',
'pygments.lexers', 'pygments.lexers.agile']
elif module_name == 'zmq':
# FIXME: this is not working, yet... (missing DLL)
self.includes += ['zmq', 'zmq.core._poll', 'zmq.core._version', 'zmq.core.constants', 'zmq.core.context', 'zmq.core.device', 'zmq.core.error', 'zmq.core.message', 'zmq.core.socket', 'zmq.core.stopwatch']
if os.name == 'nt':
self.bin_includes += ['libzmq.dll']
elif module_name == 'guidata':
self.add_module_data_files('guidata', ("images", ),
('.png', '.svg'), copy_to_root=False)
try:
imp.find_module('PyQt4')
self.add_pyqt4()
except ImportError:
self.add_pyside()
elif module_name == 'guiqwt':
self.add_module_data_files('guiqwt', ("images", ),
('.png', '.svg'), copy_to_root=False)
if os.name == 'nt':
# Specific to cx_Freeze: including manually MinGW DLLs
self.bin_includes += ['libgcc_s_dw2-1.dll',
'libstdc++-6.dll']
else:
try:
# Modules based on the same scheme as guidata and guiqwt
self.add_module_data_files(module_name, ("images", ),
('.png', '.svg'), copy_to_root=False)
except IOError:
raise RuntimeError("Module not supported: %s" % module_name)
def add_module_data_dir(self, module_name, data_dir_name, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files in *data_dir_name* for module *module_name*
and add them to *data_files*
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
module_dir = get_module_path(module_name)
nstrip = len(module_dir) + len(osp.sep)
data_dir = osp.join(module_dir, data_dir_name)
if not osp.isdir(data_dir):
raise IOError("Directory not found: %s" % data_dir)
for dirpath, _dirnames, filenames in os.walk(data_dir):
dirname = dirpath[nstrip:]
if osp.basename(dirpath) in exclude_dirs:
continue
if not copy_to_root:
dirname = osp.join(module_name, dirname)
pathlist = [osp.join(dirpath, f) for f in filenames
if osp.splitext(f)[1].lower() in extensions]
self.data_files.append( (dirname, pathlist) )
if verbose:
for name in pathlist:
print(" ", name)
def add_module_data_files(self, module_name, data_dir_names, extensions,
copy_to_root=True, verbose=False,
exclude_dirs=[]):
"""
Collect data files for module *module_name* and add them to *data_files*
*data_dir_names*: list of dirnames, e.g. ('images', )
*extensions*: list of file extensions, e.g. ('.png', '.svg')
"""
print("Adding module '%s' data files in %s (%s)"\
% (module_name, ", ".join(data_dir_names), ", ".join(extensions)))
module_dir = get_module_path(module_name)
for data_dir_name in data_dir_names:
self.add_module_data_dir(module_name, data_dir_name, extensions,
copy_to_root, verbose, exclude_dirs)
translation_file = osp.join(module_dir, "locale", "fr", "LC_MESSAGES",
"%s.mo" % module_name)
if osp.isfile(translation_file):
self.data_files.append((osp.join(module_name, "locale", "fr",
"LC_MESSAGES"), (translation_file, )))
print("Adding module '%s' translation file: %s" % (module_name,
osp.basename(translation_file)))
def build(self, library, cleanup=True, create_archive=None):
"""Build executable with given library.
library:
* 'py2exe': deploy using the `py2exe` library
* 'cx_Freeze': deploy using the `cx_Freeze` library
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
if library == 'py2exe':
self.build_py2exe(cleanup=cleanup,
create_archive=create_archive)
elif library == 'cx_Freeze':
self.build_cx_freeze(cleanup=cleanup,
create_archive=create_archive)
else:
raise RuntimeError("Unsupported library %s" % library)
def __cleanup(self):
"""Remove old build and dist directories"""
remove_dir("build")
if osp.isdir("dist"):
remove_dir("dist")
remove_dir(self.target_dir)
def __create_archive(self, option):
"""Create a ZIP archive
option:
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
name = self.target_dir
os.system('zip "%s.zip" -r "%s"' % (name, name))
if option == 'move':
shutil.rmtree(name)
def build_py2exe(self, cleanup=True, compressed=2, optimize=2,
company_name=None, copyright=None, create_archive=None):
"""Build executable with py2exe
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
from distutils.core import setup
import py2exe # Patching distutils -- analysis:ignore
self._py2exe_is_loaded = True
if cleanup:
self.__cleanup()
sys.argv += ["py2exe"]
options = dict(compressed=compressed, optimize=optimize,
includes=self.includes, excludes=self.excludes,
dll_excludes=self.bin_excludes,
dist_dir=self.target_dir)
windows = dict(name=self.name, description=self.description,
script=self.script, icon_resources=[(0, self.icon)],
bitmap_resources=[], other_resources=[],
dest_base=osp.splitext(self.target_name)[0],
version=self.version,
company_name=company_name, copyright=copyright)
setup(data_files=self.data_files, windows=[windows,],
options=dict(py2exe=options))
if create_archive:
self.__create_archive(create_archive)
def add_executable(self, script, target_name, icon=None):
"""Add executable to the cx_Freeze distribution
Not supported for py2exe"""
from cx_Freeze import Executable
base = None
if script.endswith('.pyw') and os.name == 'nt':
base = 'win32gui'
self.executables += [Executable(self.script, base=base, icon=self.icon,
targetName=self.target_name)]
def build_cx_freeze(self, cleanup=True, create_archive=None):
"""Build executable with cx_Freeze
cleanup: remove 'build/dist' directories before building distribution
create_archive (requires the executable `zip`):
* None or False: do nothing
* 'add': add target directory to a ZIP archive
* 'move': move target directory to a ZIP archive
"""
assert not self._py2exe_is_loaded, \
"cx_Freeze can't be executed after py2exe"
from cx_Freeze import setup
if cleanup:
self.__cleanup()
sys.argv += ["build"]
build_exe = dict(include_files=to_include_files(self.data_files),
includes=self.includes, excludes=self.excludes,
bin_excludes=self.bin_excludes,
bin_includes=self.bin_includes,
bin_path_includes=self.bin_path_includes,
bin_path_excludes=self.bin_path_excludes,
build_exe=self.target_dir)
setup(name=self.name, version=self.version,
description=self.description, executables=self.executables,
options=dict(build_exe=build_exe))
if create_archive:
self.__create_archive(create_archive)
|
gpl-3.0
|
DavidMcDonald1993/ghsom
|
ghsom_parallel-Copy1.py
|
1
|
17499
|
# coding: utf-8
# In[4]:
from __future__ import division
from sys import stdout
import numpy as np
import networkx as nx
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics import pairwise_distances_argmin_min
import matplotlib.pyplot as plt
from itertools import repeat
from Queue import Queue
from threading import Thread
from threading import current_thread
MIN_EXPANSION_SIZE = 10
MAX_DELETED_NEURONS = 3
#########################################################################################################################
##function to visualise graph
def visualise_graph(G, colours, layer):
## create new figure for graph plot
fig, ax = plt.subplots()
# graph layout
pos = nx.spring_layout(G)
#attributes in this graph
attributes = np.unique([v for k, v in nx.get_node_attributes(G, "assigned_community_layer_{}".format(layer)).items()])
# draw nodes -- colouring by cluster
for i in range(min(len(colours), len(attributes))):
node_list = [n for n in G.nodes() if G.node[n]["assigned_community_layer_{}".format(layer)] == attributes[i]]
colour = [colours[i] for n in range(len(node_list))]
nx.draw_networkx_nodes(G, pos, nodelist=node_list, node_color=colour)
#draw edges
nx.draw_networkx_edges(G, pos)
# draw labels
nx.draw_networkx_labels(G, pos)
#title of plot
plt.title('Nodes coloured by cluster, layer: {}'.format(layer))
#show plot
plt.show()
## visualise graph based on network clusters
def visualise_network(network, colours, layer):
#num neurons in lattice
num_neurons = len(network)
##create new figure for lattice plot
fig, ax = plt.subplots()
# graph layout
pos = nx.spring_layout(network)
# draw nodes -- colouring by cluster
for i in range(len(colours)):
nx.draw_networkx_nodes(network, pos, nodelist = [network.nodes()[i]], node_color = colours[i])
#draw edges
nx.draw_networkx_edges(network, pos)
# draw labels
nx.draw_networkx_labels(network, pos)
#label axes
plt.title('Neurons in lattice, layer: '+str(layer))
#show lattice plot
plt.show()
##########################################################################################################################
#function to generate real valued som for graph input
#three initial nodes
def initialise_network(ID, X, starting_nodes=1):
#network will be a one dimensional list
network = nx.Graph(ID = ID)
#initialise a network with just one neuron
network.add_nodes_from(range(1, starting_nodes + 1))
#id of nodes
for n in network.nodes():
network.node[n]["ID"] = "{}-{}".format(ID, str(n).zfill(2))
#connect nodes
for i in range(1, starting_nodes + 1):
for j in range(i + 1, starting_nodes + 1):
network.add_edge(i, j)
#assign a random vector in X to be the weight
V = X[np.random.randint(len(X), size=starting_nodes)]
return network, V
#########################################################################################################################
def precompute_sigmas(sigma, num_epochs):
return np.array([sigma * np.exp(-2 * sigma * e / num_epochs)
for e in range(num_epochs)])
##########################################################################################################################
##TODO
# function to train SOM on given graph
def train_network(X, network, V, num_epochs, eta_0, precomputed_sigmas):
#initial learning rate
eta = eta_0
#list if all patterns to visit
training_patterns = range(len(X))
#shortest path matrix
shortest_path = np.array(nx.floyd_warshall_numpy(network))
# net_change = np.zeros((V.shape))
for e in range(num_epochs):
#shuffle nodes
np.random.shuffle(training_patterns)
sigma = precomputed_sigmas[e]
# iterate through N nodes of graph
for i in training_patterns:
#data point to consider
x = X[i]
#determine winning neuron
closest_neuron = winning_neuron(x, V)
# update weights
deltaV = update_weights(x, V, closest_neuron, shortest_path[closest_neuron], eta, sigma)
#weight update (vectorised)
V += deltaV
# net_change += deltaV
# print "TRAINING COMPLETED"
# print net_change
# print np.linalg.norm(net_change, axis=1)
return V
##########################################################################################################################
# winning neuron
def winning_neuron(x, V):
distances = np.linalg.norm(x - V, axis=1)
return distances.argmin()
##########################################################################################################################
# function to update weights
def update_weights(x, V, winning_neuron, shortest_path_length, eta, sigma):
#weight update (vectorised)
return np.dot(np.diag(eta * np.exp(- shortest_path_length ** 2 / (2 * sigma ** 2))),
(x - V))
########################################################################################################################
# assign nodes into clusters
def assign_nodes(names, X, network, V):
#distance from each datapoint (row) to each weight vector (column)
# distances = euclidean_distances(X, V)
#
arg_min_distances, min_distances = pairwise_distances_argmin_min(X, V)
#nodes corresponding to minimum index (of length len(X))
minimum_nodes = np.array([network.nodes()[n] for n in arg_min_distances])
#list of neurons with no assignments
empty_neurons = np.array([n for n in network.nodes() if n not in minimum_nodes])
if empty_neurons.size > 0:
################################################DELETION####################################################
#neighbours of deleted neurons
neighbour_lists = np.array([network.neighbors(n) for n in empty_neurons])
print "DELETING NODES: {}".format(empty_neurons)
#remove the nodes
network.remove_nodes_from(empty_neurons)
##remove from V
V = np.array([V[i] for i in range(len(V)) if i in arg_min_distances])
#compute distances between all neurons in input space
computed_neuron_distances = compute_euclidean_distances(network, V)
##connect separated components
for neighbour_list in neighbour_lists:
connect_components(network, neighbour_list, computed_neuron_distances)
############################################################################################################
#array of errors
errors = np.array([np.mean(min_distances[minimum_nodes == n]) for n in network.nodes()])
#compute MQE
MQE = np.mean(errors)
print "MQE={}, size of map={}".format(MQE, len(network))
##array of assignments
assignments_array = np.array([np.array([names[i] for i in np.where(minimum_nodes == n)[0]]) for n in network.nodes()])
#zip zith nodes
errors = {n: e for n, e in zip(network.nodes(), errors)}
assignments = {n: a for n, a in zip(network.nodes(), assignments_array)}
# print "ERRORS"
# print errors
# print "number of nodes assigned to neurons"
# print {n: len(a) for n, a in zip(network.nodes(), assignments_array)}
nx.set_node_attributes(network, "e", errors)
nx.set_node_attributes(network, "ls", assignments)
return MQE, empty_neurons.size, V
##########################################################################################################################
def compute_euclidean_distances(network, V):
distances = euclidean_distances(V)
return {network.nodes()[i] : {network.nodes()[j] : distances[i, j] for j in range(len(distances[i]))}
for i in range(len(distances))}
#########################################################################################################################
def connect_components(network, neighbour_list, computed_neuron_distances):
sub_network = network.subgraph(neighbour_list)
connected_components = [sub_network.subgraph(c) for c in nx.connected_components(sub_network)]
number_of_connected_components = len(connected_components)
for i in range(number_of_connected_components):
connected_component_1 = connected_components[i].nodes()
for j in range(i + 1, number_of_connected_components):
connected_component_2 = connected_components[j].nodes()
distances = np.array([[computed_neuron_distances[n1][n2] for n2 in connected_component_2]
for n1 in connected_component_1])
min_n1, min_n2 = np.unravel_index(distances.argmin(), distances.shape)
network.add_edge(connected_component_1[min_n1],
connected_component_2[min_n2])
##########################################################################################################################
##function to identify neuron with greatest error
def identify_error_unit(network):
errors = nx.get_node_attributes(network, "e")
# print "ERROR UNIT"
# print max(errors, key=errors.get)
return max(errors, key=errors.get)
##########################################################################################################################
def expand_network(ID, named_X, network, V, error_unit):
#v goes to random vector in range of error unit
ls = network.node[error_unit]["ls"]
r = np.random.randint(len(ls))
v = named_X[ls[r]]
#zip nodes and distances
distances = zip(network.nodes(), np.linalg.norm(V - v, axis=1))
#identify neighbour pointing closet
error_unit_neighbours = network.neighbors(error_unit)
#id of new node
new_node = max(network) + 1
#add new node to map
network.add_node(new_node)
##id
network.node[new_node]["ID"] = "{}-{}".format(ID, str(new_node).zfill(2))
#add edges to map
#connect error unit and new node
network.add_edge(error_unit, new_node)
if len(error_unit_neighbours) > 0:
##find closest neighbour
distances = {n: v for n, v in distances if n in error_unit_neighbours}
closest_neighbour = min(distances, key=distances.get)
#connect to error unit and closest neighbour
network.add_edge(closest_neighbour, new_node)
#add v to V
V = np.vstack([V, v])
return V
##########################################################################################################################
##########################################################################################################################
##GHSOM algorithm
def ghsom(ID, named_X, num_iter, eta, sigma, e_0, e_sg, e_en, q):
print "MQE_0={}, growth target={}".format(e_0, e_0 * e_sg)
#separate names and matrix of node embedding
names, X = zip(*named_X.items())
names = np.array(names)
X = np.array(X)
#create som for this neuron
network, V = initialise_network(ID, X)
#precompute sigmas
precomputed_sigmas = precompute_sigmas(sigma, num_iter)
#train for lamda epochs
V = train_network(X, network, V, num_iter, eta, precomputed_sigmas)
#classify nodes and compute error
MQE, num_deleted_neurons, V = assign_nodes(names, X, network, V)
##som growth phase
#repeat until error is low enough
while MQE > e_sg * e_0 and num_deleted_neurons < MAX_DELETED_NEURONS:
#find neuron with greatest error
error_unit = identify_error_unit(network)
#expand network
V = expand_network(ID, named_X, network, V, error_unit)
#train for lam epochs
V = train_network(X, network, V, num_iter, eta, precomputed_sigmas)
#calculate mean network error
MQE, deleted_neurons, V = assign_nodes(names, X, network, V)
num_deleted_neurons += deleted_neurons
print "growth terminated, MQE: {}, target: {}, number of deleted neurons: {}".format(MQE,
e_0 * e_sg, num_deleted_neurons)
##neuron expansion phase
#iterate thorugh all neruons and find neurons with error great enough to expand
for _, d in network.nodes(data=True):
#unpack
node_id = d["ID"]
ls = d["ls"]
e = d["e"]
#check error
if (e > e_en * e_0 and len(ls) > MIN_EXPANSION_SIZE and num_deleted_neurons < MAX_DELETED_NEURONS):
# id = "{}-{}".format(ID, node_id)
sub_X = {k: named_X[k] for k in ls}
print "submitted job: ID={}, e={}, number of nodes={}".format(node_id, e, len(ls))
#add these parameters to the queue
q.put((node_id, sub_X, num_iter, eta, sigma, e, e_sg, e_en))
#return network
return network, MQE
##########################################################################################################################
##########################################################################################################################
def label_nodes(G, networks):
for _, network, _ in networks:
for _, d in network.nodes(data=True):
community = d["ID"]
layer = community.count("-")
assignment_string = "assigned_community_layer_{}".format(layer)
for node in d["ls"]:
G.node[node][assignment_string] = community
##########################################################################################################################
def NMI_one_layer(G, label, layer):
#actual community for this layer
actual_community_labels = np.array([v for k, v in nx.get_node_attributes(G, label).items()])
#predicted communitiy for this layer
predicted_community_labels = np.array([v for k, v in nx.get_node_attributes(G,
"assigned_community_layer_{}".format(layer)).items()])
print actual_community_labels
print predicted_community_labels
return met.normalized_mutual_info_score(actual_community_labels, predicted_community_labels)
def NMI_all_layers(G, labels):
return np.array([NMI_one_layer(G, labels[i], i + 1) for i in range(len(labels))])
##########################################################################################################################
## get embedding
def get_embedding(G):
return np.array([v for k, v in nx.get_node_attributes(G, "embedding").items()])
##########################################################################################################################
def process_job(q, networks):
#unpack first element of queue
#contains all the para,eters for GHSOM
ID, X, num_iter, eta, sigma, e_0, e_sg, e_en = q.get()
#run GHSOM and return a network and MQE
n, e = ghsom(ID, X, num_iter, eta, sigma, e_0, e_sg, e_en, q)
#append result to networks list
networks.append((ID, n, e))
#mark task as done
q.task_done()
def worker(q, networks):
#continually poll queue for jobs
while True:
process_job(q, networks)
def main(params, filename, num_iter=10000, num_threads=1):
#network
G = nx.read_gpickle(filename)
#embedding matrix
X = get_embedding(G)
#zip with names
named_X = {k: v for k, v in zip(G.nodes(), X)}
##list of returned networks
networks = []
#initilise worker queue
q = Queue()
##initial MQE is variance of dataset
m = np.mean(X, axis=0)
MQE_0 = np.mean(np.linalg.norm(X - m, axis=1))
#add initial layer of ghsom to queue
q.put(("01", named_X, num_iter, params["eta"], params["sigma"], MQE_0, params["e_sg"], params["e_en"]))
if num_threads > 1:
#initialise threads
for i in range(num_threads):
t = Thread(target=worker, args=(q, networks))
t.setDaemon(True)
t.start()
#finally wait until queue is empty and all tasks are done
q.join()
else :
#single thread
while not q.empty():
process_job(q, networks)
print "DONE"
return G, networks
# In[2]:
# params = {'eta': 0.0001,
# 'sigma': 1,
# 'e_sg': 0.7,
# 'e_en': 1.0}
# In[3]:
# %prun G, networks = main(params=params, filename="embedded_benchmark.gpickle", num_threads=1, num_iter=1000)
# In[70]:
# label_nodes(G, networks)
# In[73]:
# NMI_all_layers(G, labels=["firstlevelcommunity"])
# In[74]:
# _, network, _ = networks[0]
# colours = np.random.rand(len(network), 3)
# In[75]:
# visualise_graph(G=G, colours=colours, layer=1)
|
gpl-2.0
|
jorgealmerio/QEsg
|
core/QEsg_05Perfil.py
|
1
|
7227
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QEsg_05Perfil
A QGIS plugin
Plugin para Calculo de redes de esgotamento sanitario
-------------------
begin : 2016-03-15
git sha : $Format:%H$
copyright : (C) 2016 by Jorge Almerio
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from qgis.utils import *
import os.path
from QEsg_00Model import *
from QEsg_03Dimensionamento import *
from QEsg_05ProfileDialog import ProfileDialog
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Rectangle
from matplotlib.legend_handler import HandlerPatch
import matplotlib.patches as mpatches
import qgis
class HandlerEllipse(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = mpatches.Ellipse(xy=center, width=width + xdescent,
height=height + ydescent)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
class QEsg_05Perfil:
def __init__(self):
self.dlg=ProfileDialog()
self.DimensClasse=QEsg_03Dimens()
def run(self):
# self.DimensClasse=QEsg_03Dimens()
vLayer=self.DimensClasse.PegaQEsgLayer('PIPES')
if vLayer==False:
aviso=QCoreApplication.translate('QEsg',u'Layer Tipo \'PIPES\' indefinido ou não encontrado!')
iface.messageBar().pushMessage("QEsg:", aviso, level=QgsMessageBar.WARNING, duration=4)
return False
valores=[]
idx = vLayer.fieldNameIndex('Coletor')
valInts = vLayer.uniqueValues( idx )
valores=[str(i) for i in valInts]
self.dlg.cmbColetores.clear()
self.dlg.cmbColetores.addItems(valores)
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
Coletor=self.dlg.cmbColetores.itemText(self.dlg.cmbColetores.currentIndex())
self.Desenha_Perfil(vLayer,Coletor)
def Desenha_Perfil(self,vLayer,Coletor=1):
campo='Coletor'
coletor=Coletor
request = QgsFeatureRequest()
expres='\"'+campo+'\"='+coletor
request.setFilterExpression(expres)
lstCTx=[]
lstCGx=[]
lstCTy=[]
cc=[]
cgs=[]
na=[]
ext=0
titulo=QCoreApplication.translate('QEsg','Coletor ')+Coletor
plt.figure(num=titulo)
ax = plt.gca()
#Lista as interfencias de todos os trechos apenas uma vez
lstIds,lstTr_inter=self.DimensClasse.Lista_Interferencias(vLayer)
HasInter=False
for feat in vLayer.getFeatures(request):
ident=feat['DC_ID']
lstCTx.append(ext)
lstCGx.append(ext)
ctm=feat['CTM']
ccm=feat['CCM']
namon=feat['NA_MON']
diam=feat['DIAMETER']/1000.
lstCTy.append(ctm)
cc.append(ccm)
na.append(namon)
cgs.append(ccm+diam)
self.Desenha_PV(ax, ext, ctm, ccm, .08, .01)
tr_id=feat.id()
#lista as interferencias apenas do trecho
interfs=[[distMont,cs,ci,tipoInt] for id,distMont,cs,ci,tipoInt in lstTr_inter if id == tr_id]
for distMont,cs,ci,tipoInt in interfs:
locX=ext+distMont
if tipoInt=='TN':
lstCTx.append(locX)
lstCTy.append(cs)
else:
ellipse = Ellipse(xy=(locX, (cs+ci)/2.), width=cs-ci, height=cs-ci,
edgecolor='r', fc='None', lw=2)
intLine=ax.add_patch(ellipse)
HasInter=True
ctj=feat['CTJ']
ccj=feat['CCJ']
najus=feat['NA_JUS']
plt.annotate(ident,(ext,ctm))
ext+=feat['LENGTH']
lstCTx.append(ext)
lstCGx.append(ext)
lstCTy.append(ctj)
cc.append(ccj)
na.append(najus)
cgs.append(ccj+diam)
#end for Trechos
#Draw last PV
self.Desenha_PV(ax, ext, ctj, ccj, .08, .01)
ctLine,=plt.plot(lstCTx,lstCTy,color='magenta')
cgsLine,=plt.plot(lstCGx,cgs,color='green')
naLine,=plt.plot(lstCGx,na,color='cyan')
cgiLine,=plt.plot(lstCGx,cc,color='blue')
plt.xlabel(QCoreApplication.translate('QEsg',u'Distância (m)'))
plt.ylabel(QCoreApplication.translate('QEsg','Cota (m)'))
plt.grid(True)
LegLines=[ctLine,cgsLine,naLine,cgiLine]
subs=[QCoreApplication.translate('QEsg','Cota do Terreno'),
QCoreApplication.translate('QEsg','Cota da Geratriz Superior'),
QCoreApplication.translate('QEsg','Cota do NA'),
QCoreApplication.translate('QEsg','Cota da Geratriz Inferior')
]
#QCoreApplication.translate('QEsg','PV\'s')
if HasInter:
LegLines.append(intLine)
subs.append(QCoreApplication.translate('QEsg',u'Interferências'))
hndMap={intLine: HandlerEllipse()}
else:
hndMap={}
plt.legend(LegLines,subs,handler_map=hndMap,loc='best')
plt.title(titulo)
plt.show()
plt.draw()
def Desenha_PV(self,ax,ext,ctm,ccm,pvDiam,thick):
#Add PV wall
#thick=.1 #espessura da parede
#pvDiam=.8+2.*thick #PV diam
pvBLx=(ext-pvDiam/2.) #PV Bottom Left X
pvBLy=ccm-thick #PV Bottom Left Y
pvH=ctm-ccm+thick #PV Height
rect = plt.Rectangle((pvBLx, pvBLy), pvDiam, pvH, facecolor="#aaaaaa",alpha=.70)
ax.add_patch(rect)
#Add PV
#pvDiam=.8 #PV diam
pvBLx=ext-pvDiam/2. #PV Bottom Left X
pvBLy=ccm #PV Bottom Left Y
pvH=ctm-ccm-thick/2. #PV Height
rect = plt.Rectangle((pvBLx, pvBLy), pvDiam, pvH, facecolor="white")
ax.add_patch(rect)
#Linha vertical no eixo do PV
plt.plot([ext,ext],[ctm,ccm],color='black',linestyle='--')
|
gpl-3.0
|
YasukeXXX/Heppoco
|
real-time-recog.py
|
1
|
2903
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import chainer
from chainer import computational_graph
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import optimizers , Variable
from chainer import serializers
from chainer import link
import sys
import cPickle as pickle
import os
import threading
import PIL.Image as Image
import cv2
func = pickle.load(open('googlenet.pkl', 'rb'))
synset_words = np.array([line[10:-1] for line in open(os.path.join('ilsvrc', 'synset_words.txt'), 'r')])
in_size = 224
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
# 平均画像の読み込み
meanfile = os.path.join('ilsvrc', 'ilsvrc_2012_mean.npy')
mean_image = np.load(meanfile)
mean_image = mean_image[:, start:stop, start:stop].copy()
def predict(image):
"""画像を判別"""
global mean_image, in_size, cropwidth, start, stop
def swap(x):
x = np.array(x)[:, :, ::-1]
x = np.swapaxes(x, 0, 2)
x = np.swapaxes(x, 1, 2)
return x
x_data = np.ndarray((1, 3, in_size, in_size), dtype=np.float32)
image = swap(image)
image = image[:, start:stop, start:stop].copy().astype(np.float32)
x_data[0] = image-mean_image
x = chainer.Variable(x_data, volatile=True)
y, = func(inputs={'data': x}, outputs=['loss3/classifier'], train=False)
synset_i = y.data.argmax(axis=1)
return synset_words[synset_i]
def cap():
"""リアルタイムで画像をキャプチャする"""
global capture,msg
fontface=cv2.FONT_ITALIC
fontscale=1.0
bold = 4
color=(255,0,255)
while True:
ret, image = capture.read()
if ret == False:
cv2.destroyAllWindows()
break
location=(0,image.shape[0]/2)
cv2.putText(image,msg,location,fontface,fontscale,color,bold)
cv2.imshow("Image Recognition", image)
if cv2.waitKey(33) >= 0:
# cv2.imwrite("image.png", image)
cv2.destroyAllWindows()
break
print 'out'
cv2.destroyAllWindows()
def main():
"""取り込まれた画像を判別してラベルをセット"""
global msg, capture
while True:
ret, image = capture.read()
if ret == False:
break
img = Image.fromarray(image[::-1, :, ::-1].copy()).resize((256, 256), Image.ANTIALIAS)
# ラベルをcapとの共通変数にセット
msg = predict(img)[0]
# キー入力で終了
if cv2.waitKey(33) >= 0:
break
if capThread.isAlive() == False:
break
capture = cv2.VideoCapture(0)
msg = ''
cv2.namedWindow("Image Recognition", cv2.WINDOW_NORMAL)
capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,324) #2592
capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,228) #1944
capThread = threading.Thread(target=cap, name='cap')
capThread.setDaemon(True)
capThread.start()
main()
|
gpl-3.0
|
pelson/cartopy
|
lib/cartopy/examples/utm_all_zones.py
|
4
|
1319
|
"""
Displaying all 60 zones of the UTM projection
---------------------------------------------
This example displays all 60 zones of the Universal Transverse Mercator
projection next to each other in a figure.
First we create a figure with 60 subplots in one row.
Next we set the projection of each axis in the figure to a specific UTM zone.
Then we add coastlines, gridlines and the number of the zone.
Finally we add a supertitle and display the figure.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
def main():
# Create a list of integers from 1 - 60
zones = range(1, 61)
# Create a figure
fig = plt.figure(figsize=(18, 6))
# Loop through each zone in the list
for zone in zones:
# Add GeoAxes object with specific UTM zone projection to the figure
ax = fig.add_subplot(1, len(zones), zone,
projection=ccrs.UTM(zone=zone,
southern_hemisphere=True))
# Add coastlines, gridlines and zone number for the subplot
ax.coastlines(resolution='110m')
ax.gridlines()
ax.set_title(zone)
# Add a supertitle for the figure
fig.suptitle("UTM Projection - Zones")
# Display the figure
plt.show()
if __name__ == '__main__':
main()
|
lgpl-3.0
|
rbalda/neural_ocr
|
env/lib/python2.7/site-packages/scipy/stats/stats.py
|
3
|
173343
|
# Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import _find_repeats, linregress, theilslopes
from ._rank import tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
@np.deprecate(message=("scipy.stats.histogram is deprecated in scipy 0.17.0; "
"use np.histogram instead"))
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
# _histogram is used in relfreq/cumfreq, so need to keep it
res = _histogram(a, numbins=numbins, defaultlimits=defaultlimits,
weights=weights, printextras=printextras)
return res
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
return Ks_2sampResult(d, prob)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([ 2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([ 4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
def rankdata(a, method='average'):
"""
rankdata(a, method='average')
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
mit
|
krez13/scikit-learn
|
sklearn/svm/setup.py
|
321
|
3157
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
zhenv5/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
98
|
20870
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
ntvis/tushare
|
tushare/stock/macro.py
|
37
|
12728
|
# -*- coding:utf-8 -*-
"""
宏观经济数据接口
Created on 2015/01/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
import re
import json
from tushare.stock import macro_vars as vs
from tushare.stock import cons as ct
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_gdp_year():
"""
获取年度国内生产总值数据
Return
--------
DataFrame
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 0, 70,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_YEAR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_quarter():
"""
获取季度国内生产总值数据
Return
--------
DataFrame
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 1, 250,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_QUARTER_COLS)
df['quarter'] = df['quarter'].astype(object)
df[df==0] = np.NaN
return df
def get_gdp_for():
"""
获取三大需求对GDP贡献数据
Return
--------
DataFrame
year :统计年度
end_for :最终消费支出贡献率(%)
for_rate :最终消费支出拉动(百分点)
asset_for :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_for :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 4, 80, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
df = pd.DataFrame(js,columns=vs.GDP_FOR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_pull():
"""
获取三大产业对GDP拉动数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 5, 60, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_PULL_COLS)
df[df==0] = np.NaN
return df
def get_gdp_contrib():
"""
获取三大产业贡献率数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint,
vs.MACRO_TYPE[0], 6, 60, rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_CONTRIB_COLS)
df[df==0] = np.NaN
return df
def get_cpi():
"""
获取居民消费价格指数数据
Return
--------
DataFrame
month :统计月份
cpi :价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 0, 600,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.CPI_COLS)
df['cpi'] = df['cpi'].astype(float)
return df
def get_ppi():
"""
获取工业品出厂价格指数数据
Return
--------
DataFrame
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 3, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.PPI_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
df[i] = df[i].astype(float)
return df
def get_deposit_rate():
"""
获取存款利率数据
Return
--------
DataFrame
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 2, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.DEPOSIT_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_loan_rate():
"""
获取贷款利率数据
Return
--------
DataFrame
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 3, 800,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.LOAN_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_rrr():
"""
获取存款准备金率数据
Return
--------
DataFrame
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 4, 100,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.RRR_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply():
"""
获取货币供应量数据
Return
--------
DataFrame
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 1, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply_bal():
"""
获取货币供应量(年底余额)数据
Return
--------
DataFrame
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 0, 200,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_BLA_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
|
bsd-3-clause
|
justincassidy/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
lennepkade/dzetsaka
|
scripts/domainAdaptation.py
|
1
|
12927
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 11:24:38 2018
@author: nkarasiak
"""
try:
# if use in Qgis 3
from . import function_dataraster as dataraster
from .mainfunction import pushFeedback
except BaseException:
import function_dataraster as dataraster
from mainfunction import pushFeedback
import gdal
#import tempfile
# import ot
import os
#from sklearn import preprocessing
import numpy as np
class rasterOT(object):
"""
Initialize Python Optimal Transport for raster processing.
Parameters
----------
transportAlgorithm : str
item in list : ['MappingTransport','EMDTransport','SinkhornTransport','SinkhornLpl1Transport','SinkhornL1l2Transport']
scaler : bool
If scaler is True, use MinMaxScaler with feature_range from -1 to 1.
param : dict
Target domain array.
feedback : object
feedback object from Qgis Processing
"""
def __init__(self, transportAlgorithm="MappingTransport",
scaler=False, params=None, feedback=True):
try:
from sklearn.metrics import mean_squared_error
from itertools import product
from sklearn.metrics import (
f1_score, cohen_kappa_score, accuracy_score)
except BaseException:
raise ImportError('Please install itertools and scikit-learn')
self.transportAlgorithm = transportAlgorithm
self.feedback = feedback
self.params_ = params
if scaler:
from sklearn.preprocessing import MinMaxScaler
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.scalerTarget = MinMaxScaler(feature_range=(-1, 1))
else:
self.scaler = scaler
def learnTransfer(self, Xs, ys, Xt, yt=None):
"""
Learn domain adaptation model.
Parameters
----------
Xs : array_like, shape (n_source_samples, n_features)
Source domain array.
ys : array_like, shape (n_source_samples,)
Label source array (1d).
Xt: array_like, shape (n_source_samples, n_features)
Target domain array.
yt: array_like, shape (n_source_samples,)
Label target array (1d).
Returns
-------
transportmodel : object
The output model
"""
# save original samples
self.Xs_ = Xs
self.Xt_ = Xt
self.params = self.params_
if self.feedback:
pushFeedback(10, feedback=self.feedback)
pushFeedback('Learning Optimal Transport with ' +
str(self.transportAlgorithm) +
' algorithm.', feedback=self.feedback)
# check if label is 1d
if ys is not None:
if len(ys.shape) > 1:
ys = ys[:, 0]
if yt is not None:
if len(yt.shape) > 1:
yt = yt[:, 0]
# rescale Data
if self.scaler:
self.scaler.fit(Xs, ys)
self.scalerTarget.fit(Xt, yt)
Xs = self.scaler.transform(Xs)
Xt = self.scalerTarget.transform(Xt)
# import Domain Adaptation specific algorithm function from OT Library
self.transportFunction = getattr(
__import__("ot").da, self.transportAlgorithm)
if self.params is None:
self.transportModel = self.transportFunction()
else:
# order for reproductibility
self.params = sorted(self.params.items())
# if grid search
if self.isGridSearch():
# compute combinaison for each param
self.findBestParameters(Xs, ys=ys, Xt=Xt, yt=yt)
self.transportModel = self.transportFunction(**self.bestParam)
else:
# simply train with basic param
self.transportModel = self.transportFunction(**self.params_)
self.transportModel.fit(Xs, ys=ys, Xt=Xt, yt=yt)
if self.feedback:
pushFeedback(20, feedback=self.feedback)
return self.transportModel
def predictTransfer(self, imageSource, outRaster, mask=None,
NODATA=-9999, feedback=None, norm=False):
"""
Predict model using domain adaptation.
Parameters
----------
model : object
Model generated from learnTransfer function.
imageSource : str
Path of image to adapt (source image)
outRaster : str
Path of tiff image to save as.
mask: str, optional
Path of raster mask.
NODATA : int, optional
Default -9999
feedback : object, optional
For Qgis Processing. Default is None.
Returns
-------
outRaster : str
Return the path of the predicted image.
"""
if self.feedback:
pushFeedback('Now transporting ' +
str(os.path.basename(imageSource)))
dataSrc = gdal.Open(imageSource)
# Get the size of the image
d = dataSrc.RasterCount
nc = dataSrc.RasterXSize
nl = dataSrc.RasterYSize
# Get the geoinformation
GeoTransform = dataSrc.GetGeoTransform()
Projection = dataSrc.GetProjection()
# Get block size
band = dataSrc.GetRasterBand(1)
block_sizes = band.GetBlockSize()
x_block_size = block_sizes[0]
y_block_size = block_sizes[1]
#gdal_dt = band.DataType
# Initialize the output
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(outRaster, nc, nl, d, 3)
dst_ds.SetGeoTransform(GeoTransform)
dst_ds.SetProjection(Projection)
del band
# Perform the classification
if mask is not None:
maskData = gdal.Open(mask, gdal.GA_ReadOnly)
total = nl * y_block_size
total = 80 / (int(nl / y_block_size))
for i in range(0, nl, y_block_size):
# feedback for Qgis
if self.feedback:
pushFeedback(int(i * total) + 20, feedback=self.feedback)
try:
if self.feedback.isCanceled():
break
except BaseException:
pass
if i + y_block_size < nl: # Check for size consistency in Y
lines = y_block_size
else:
lines = nl - i
for j in range(
0, nc, x_block_size): # Check for size consistency in X
if j + x_block_size < nc:
cols = x_block_size
else:
cols = nc - j
# Load the data and Do the prediction
X = np.empty((cols * lines, d))
for ind in range(d):
X[:, ind] = dataSrc.GetRasterBand(
int(ind + 1)).ReadAsArray(j, i, cols, lines).reshape(cols * lines)
# Do the prediction
if mask is None:
mask_temp = dataSrc.GetRasterBand(1).ReadAsArray(
j, i, cols, lines).reshape(cols * lines)
else:
mask_temp = maskData.GetRasterBand(1).ReadAsArray(
j, i, cols, lines).reshape(cols * lines)
# check if nodata
t = np.where((mask_temp != 0) & (X[:, 0] != NODATA))[0]
# transform array, default has nodata value
yp = np.empty((cols * lines, d))
yp[:, :] = NODATA
# yp = np.nan((cols*lines,d))
# K = np.zeros((cols*lines,))
# TODO: Change this part accorindgly ...
# if t.size > 0:
if t.size > 0:
tempOT = X[t, :]
yp[t, :] = self.transportModel.transform(tempOT)
for ind in range(d):
out = dst_ds.GetRasterBand(ind + 1)
# Write the data
ypTemp = yp[:, ind]
out.WriteArray(ypTemp.reshape(lines, cols), j, i)
out.SetNoDataValue(NODATA)
out.FlushCache()
del X, yp
return outRaster
def isGridSearch(self):
# search for gridSearch
paramGrid = []
for key in self.params_.keys():
if isinstance(self.params_.get(key), (list, np.ndarray)):
paramGrid.append(key)
if paramGrid == []:
self.paramGrid = False
else:
self.paramGrid = paramGrid
self.params = self.params_.copy()
if self.paramGrid:
return True
else:
return False
def generateParamForGridSearch(self):
hyperParam = {key: self.params_[key] for key in self.paramGrid}
items = sorted(hyperParam.items())
keys, values = zip(*items)
for v in product(*values):
paramsToAdd = dict(zip(keys, v))
self.params.update(paramsToAdd)
yield self.params
def findBestParameters(self, Xs, ys, Xt, yt):
self.bestScore = None
for gridOT in self.generateParamForGridSearch():
self.transportModel = self.transportFunction(**gridOT)
self.transportModel.fit(Xs, ys, Xt, yt)
#XsTransformed = self.transportModel.transform(Xs)
#XsPredict = self.inverseTransform(XsTransformed)
from ot.da import BaseTransport
transp_Xt = BaseTransport.inverse_transform(
self.transportModel, Xs=Xs, ys=ys, Xt=Xt, yt=yt)
if self.feedback:
pushFeedback(
'Testing params : ' + str(gridOT),
feedback=self.feedback)
"""
#score = mean_squared_error(Xs,XsPredict)
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
param_grid = dict(gamma=2.0**np.arange(-4,1), C=10.0**np.arange(-2,3))
classifier = SVC(probability=False)
cv = StratifiedKFold(n_splits=5)
grid = GridSearchCV(classifier,param_grid=param_grid, cv=cv,n_jobs=1)
# need to rescale for hyperparameter of svm
if self.scaler is False:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(-1,1))
scaler.fit(Xs,ys)
Xs = scaler.transform(Xs)
XsPredict = scaler.transform(XsPredict)
#XsPredict = scaler.transform(XsPredict)
grid.fit(Xs,ys)
model = grid.best_estimator_
model.fit(Xs,ys)
yp = model.predict(XsPredict)
currentScore = dict(OA=accuracy_score(yp,ys),Kappa=cohen_kappa_score(yp,ys),F1=f1_score(yp,ys,average='micro'))
if self.feedback:
pushFeedback('Kappa is : '+str(currentScore.get('Kappa')))
if self.bestScore is None or self.bestScore.get('Kappa') < currentScore.get('Kappa'):
self.bestScore = currentScore.copy()
self.bestParam = gridOT.copy()
"""
currentScore = mean_squared_error(Xs, transp_Xt)
if self.feedback:
pushFeedback(
'RMSE is : ' + str(currentScore),
feedback=self.feedback)
if self.bestScore is None or self.bestScore > currentScore:
self.bestScore = currentScore
self.bestParam = gridOT.copy()
"""
del self.transportModel,yp
"""
if self.feedback:
pushFeedback('Best grid is ' +
str(self.bestParam), feedback=self.feedback)
pushFeedback('Best score is ' +
str(self.bestScore), feedback=self.feedback)
"""
def gridSearchCV(self):
"""
def inverseTransform(self, Xt):
"""Transports target samples Xt onto target samples Xs
Parameters
----------
Xt : array-like, shape (n_source_samples, n_features)
The training input samples.
Returns
-------
transp_Xt : array-like, shape (n_source_samples, n_features)
The transport source samples.
"""
# perform standard barycentric mapping
transp = self.transportModel.coupling_.T / \
np.sum(self.transportModel.coupling_, 0)[:, None]
# set nans to 0
transp[~ np.isfinite(transp)] = 0
# compute transported samples
transp_Xt = np.dot(transp, self.transportModel.xs_)
return transp_Xt
|
gpl-3.0
|
treverhines/RBF
|
docs/scripts/interpolate.a.py
|
1
|
1207
|
'''
In this example we generate synthetic scattered data with added noise
and then fit it with a smoothed RBF interpolant. The interpolant in
this example is equivalent to a thin plate spline.
'''
import numpy as np
from rbf.interpolate import RBFInterpolant
import matplotlib.pyplot as plt
np.random.seed(1)
# observation points
x_obs = np.random.random((100, 2))
# values at the observation points
u_obs = np.sin(2*np.pi*x_obs[:, 0])*np.cos(2*np.pi*x_obs[:, 1])
u_obs += np.random.normal(0.0, 0.1, 100)
# create a thin-plate spline interpolant, where the data is assumed to
# be noisy
I = RBFInterpolant(x_obs, u_obs, sigma=0.1, phi='phs2', order=1)
# create the interpolation points, and evaluate the interpolant
x1, x2 = np.linspace(0, 1, 200), np.linspace(0, 1, 200)
x_itp = np.reshape(np.meshgrid(x1, x2), (2, 200*200)).T
u_itp = I(x_itp)
# plot the results
plt.tripcolor(x_itp[:, 0], x_itp[:, 1], u_itp, vmin=-1.1, vmax=1.1, cmap='viridis')
plt.scatter(x_obs[:, 0], x_obs[:, 1], s=100, c=u_obs, vmin=-1.1, vmax=1.1,
cmap='viridis', edgecolor='k')
plt.xlim((0.05, 0.95))
plt.ylim((0.05, 0.95))
plt.colorbar()
plt.tight_layout()
plt.savefig('../figures/interpolate.a.png')
plt.show()
|
mit
|
Bleyddyn/malpi
|
exp/check_weights.py
|
1
|
2975
|
import numpy as np
import pickle
from malpi.optimizer import *
from malpi import optim
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from optparse import OptionParser
def stats(arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
arr_abs = np.abs(arr)
mi_abs = np.min(arr_abs)
ma_abs = np.max(arr_abs)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) != 1:
print "Usage: python check_weights.py <model name>"
exit()
if args[0].endswith('.pickle'):
args[0] = args[0][:-7]
with open(args[0] + '.pickle') as f:
model = pickle.load( f )
for k,w in model.params.iteritems():
stats( w, msg=k+" " )
xs = []
ys = []
zs = []
zs2 = []
zs3 = []
for x in np.random.uniform( -1.3, 0.7, 20 ):
for y in np.random.uniform( -0.8, 0.8, 20 ):
qvalues,_ = model.forward( np.reshape( np.array([x, y]), (1,2)), mode="test")
xs.append(x)
ys.append(y)
zs.append( qvalues[0][0] )
zs2.append( qvalues[0][1] )
zs3.append( qvalues[0][2] )
print "Max 0: %f" % np.max(zs)
print "Max 1: %f" % np.max(zs2)
print "Max 2: %f" % np.max(zs3)
fig = plt.figure(1,figsize=(16, 18), dpi=80)
ax = fig.add_subplot(311, projection='3d')
ax.scatter(xs,ys,zs=zs)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Left')
ax = fig.add_subplot(312, projection='3d')
ax.scatter(xs,ys,zs=zs2)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Noop')
ax = fig.add_subplot(313, projection='3d')
ax.scatter(xs,ys,zs=zs3)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Right')
plt.show()
# get qvalues for a range of mc inputs and plot them
#High: [ 0.6 0.07]
#Low: [-1.2 -0.07]
#for i in range(10):
# state = np.random.uniform( 0.0, 1.0, (1,4,84,84) )
# q_values, _ = model.forward(state, mode="test")
# print q_values[0]
#with open('optimizer_test.pickle') as f:
# (w,dw,config) = pickle.load( f )
#
#del config['cache']
#
#update_rule = optim.rmsprop
#
#model.params = {'W5': w}
#optim = Optimizer("rmsprop", model, learning_rate=config['learning_rate'], decay_rate=config['decay_rate'], epsilon=config['epsilon'])
#print config
#optim.describe()
#
#diff = model.params['W5'] - w
#stats(diff, 'before ')
#
#next_w, next_config = update_rule(w, dw, config)
#
#grads = {'W5': dw}
#optim.update(grads)
#
#diff = model.params['W5'] - next_w
#stats(diff, 'after ')
#diff = optim.cache['W5'] - next_config['cache']
#stats(optim.cache['W5'], 'cache ')
#stats(diff, 'diffs ')
#
#if False:
# for k,w in model.params.iteritems():
# print k
# mask_zeros = w != 0.0
# mask = np.abs(w) < 1e-20
# mask = np.logical_and(mask_zeros,mask)
# if np.count_nonzero(mask) > 0:
# print "Underflow in %s " % (k,)
|
mit
|
RJT1990/pyflux
|
pyflux/gas/tests/gasreg_tests_t.py
|
1
|
18048
|
import numpy as np
import pandas as pd
import pyflux as pf
# Set up some data to use for the tests
noise = np.random.normal(0,1,250)
y = np.zeros(250)
x1 = np.random.normal(0,1,250)
x2 = np.random.normal(0,1,250)
for i in range(1,len(y)):
y[i] = 0.9*y[i-1] + noise[i] + 0.1*x1[i] - 0.3*x2[i]
data = pd.DataFrame([y,x1,x2]).T
data.columns = ['y', 'x1', 'x2']
y_oos = np.random.normal(0,1,30)
x1_oos = np.random.normal(0,1,30)
x2_oos = np.random.normal(0,1,30)
data_oos = pd.DataFrame([y_oos,x1_oos,x2_oos]).T
data_oos.columns = ['y', 'x1', 'x2']
def test_normal_no_terms():
"""
Tests the length of the latent variable vector for an GASReg model
with no AR or MA terms, and tests that the values are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi():
"""
Tests an GASReg model estimated with BBVI, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_normal_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test_normal_mh():
"""
Tests an GASReg model estimated with Metropolis-Hastings, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_laplace():
"""
Tests an GASReg model estimated with Laplace approximation, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_pml():
"""
Tests an GASReg model estimated with PML, and tests that the latent variable
vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_normal_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test_normal_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_normal_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test_normal_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test_sample_model():
"""
Tests sampling function
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data))
def test_ppc():
"""
Tests PPC value
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
## Try more than one predictor
def test2_normal_no_terms():
"""
Tests the length of the latent variable vector for an GASReg model
with no AR or MA terms, and two predictors, and tests that the values
are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi():
"""
Tests an GASReg model estimated with BBVI, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi_mini_batch():
"""
Tests an ARIMA model estimated with BBVI and that the length of the latent variable
list is correct, and that the estimated latent variables are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_bbvi_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_normal_bbvi_mini_batch_elbo():
"""
Tests that the ELBO increases
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI',iterations=100, mini_batch=32, record_elbo=True)
assert(x.elbo_records[-1]>x.elbo_records[0])
def test2_normal_mh():
"""
Tests an GASReg model estimated with MEtropolis-Hastings, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('M-H',nsims=300)
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_normal():
"""
Tests an GASReg model estimated with Laplace, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('Laplace')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_pml():
"""
Tests an GASReg model estimated with PML, with multiple predictors, and
tests that the latent variable vector length is correct, and that value are not nan
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('PML')
assert(len(model.latent_variables.z_list) == 5)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test2_normal_predict_length():
"""
Tests that the length of the predict dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(model.predict(h=5, oos_data=data_oos).shape[0] == 5)
def test2_normal_predict_is_length():
"""
Tests that the length of the predict IS dataframe is equal to no of steps h
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test2_normal_predict_nans():
"""
Tests that the predictions are not NaNs
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict(h=5, oos_data=data_oos).values[np.isnan(model.predict(h=5,
oos_data=data_oos).values)]) == 0)
def test2_normal_predict_is_nans():
"""
Tests that the predictions in-sample are not NaNs
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
x.summary()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test2_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict(h=10, oos_data=data_oos, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test2_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test2_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
"""
model = pf.GASReg(formula="y ~ x1", data=data, family=pf.t())
x = model.fit('M-H', nsims=400)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
"""
def test2_sample_model():
"""
Tests sampling function
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data))
def test2_ppc():
"""
Tests PPC value
"""
model = pf.GASReg(formula="y ~ x1 + x2", data=data, family=pf.t())
x = model.fit('BBVI', iterations=100)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
|
bsd-3-clause
|
chicago-justice-project/article-tagging
|
lib/tagnews/crimetype/benchmark.py
|
2
|
3727
|
from __future__ import division, print_function
import numpy as np
import pandas as pd
def get_kfold_split(N, k=4):
"""
Create groups used for k-fold cross validation.
Parameters
----------
N : number of samples to split
k : number of groups used for cross validation
Returns
-------
List of (index_train, index_test) pairs
"""
np.random.seed(2017)
idx = np.random.permutation(N)
index_pairs = [(np.ones(N).astype(np.bool),
np.zeros(N).astype(np.bool))
for _ in range(k)]
for i, fold_idx in enumerate(np.array_split(idx, k)):
index_pairs[i][0][fold_idx] = 0
index_pairs[i][1][fold_idx] = 1
return index_pairs
def benchmark(clf_factory, X, Y, clf_params_dict=None, k=4, verbose=False):
"""
benchmark a classifier on preprocessed data.
Parameters
----------
clf_factory :
Function which returns a classifier. Classifiers implement
a `fit` method and a `predict` method. The parameters
clf_params will be passed to clf_factory.
X : NxM matrix of features
Y : NxL matrix of binary values. Y[i,j] indicates whether or
not the j'th tag applies to the i'th article.
clf_params_dict :
dictionary of parameters passed to the classifier factory.
If None, no parameters are passed.
k : how many folds to use for cross validation
verbose : Should status be printed?
"""
if clf_params_dict is None:
clf_params_dict = {}
L = Y.shape[1]
fold_indexes = get_kfold_split(X.shape[0], k)
acc = np.zeros(k)
tpr = np.zeros((k, L))
fpr = np.zeros((k, L))
ppv = np.zeros((k, L))
clfs = []
for i, (idx_trn, idx_tst) in enumerate(fold_indexes):
if verbose:
print('step {} of {}...'.format(i, k), end='')
clf = clf_factory(**clf_params_dict)
x_trn = X[idx_trn, :]
y_trn = Y[idx_trn, :]
x_tst = X[idx_tst, :]
y_tst = Y[idx_tst, :]
clf.fit(x_trn, y_trn)
y_hat = clf.predict_proba(x_tst)
y_hat = y_hat > 0.5
y_hat.dtype = np.int8
y_tst.dtype = np.int8
acc[i] = (np.sum(y_tst == y_hat)) / float(y_tst.size)
for j in range(L):
tpr[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_tst[:, j])
fpr[i, j] = (np.sum(np.logical_not(y_tst[:, j]) & y_hat[:, j])
/ np.sum(np.logical_not(y_tst[:, j])))
ppv[i, j] = np.sum(y_tst[:, j] & y_hat[:, j]) / np.sum(y_hat[:, j])
clfs.append(clf)
if verbose:
print('done')
return {'acc': acc, 'tpr': tpr, 'fpr': fpr, 'ppv': ppv, 'clfs': clfs}
def predict_articles(clf, vectorizer, df, n=100, seed=1029384756):
np.random.seed(seed)
pd.set_option('display.max_columns', 100)
pd.set_option('display.float_format', lambda x: '%.6f' % x)
random_subset = np.random.choice(np.arange(df.shape[0]),
size=n,
replace=False)
preds = clf.predict_proba(vectorizer.transform(
df.iloc[random_subset, 3].values
))
preds = pd.DataFrame(preds)
preds.columns = df.loc[:, 'OEMC':'TASR'].columns
for i, rand_i in enumerate(random_subset):
s = 'Article ID: ' + str(df.index[rand_i])
s += '\n' + df.iloc[rand_i, 3]
s += '\n Predicted Tags: '
s += str(preds.iloc[i, :].index[preds.iloc[i, :] > 0.5].values)
s += '\n' + str(preds.iloc[i, :])
s += '\n'
filename = 'test-tag-' + str(df.index[rand_i]) + '.txt'
with open(filename, 'w', encoding='utf-8') as f:
f.write(s)
|
mit
|
thiemom/docker-flask-sklearn
|
app/app.py
|
1
|
2558
|
from flask import Flask, request, render_template, jsonify
import numpy as np
from sklearn import linear_model
from sklearn.externals import joblib
from sklearn import datasets
import bokeh
import bokeh.plotting as plt
from bokeh.embed import components
# toy data
data = datasets.load_boston()
data_id = 'Boston Housing Dataset'
target_name = 'Price'
# select one feature
feature_index = 0
feature_name = data.feature_names[feature_index]
X = data.data
y = data.target
# fit model
model = linear_model.LinearRegression()
model.fit(X, y)
# save model
model_filename = 'model.pkl'
joblib.dump(model, model_filename)
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, from a Machine Learning Web Tool"
@app.route('/predict', methods=['GET'])
def predict():
loaded_model = joblib.load(model_filename)
Xp = np.empty((1, X.shape[1]))
for i, feat in enumerate(data.feature_names):
Xp[0, i] = request.args.get(feat, default=X.mean(axis=0)[i], type=np.float)
yp = loaded_model.predict(Xp)
return jsonify(
data=dict(zip(data.feature_names, Xp.T.tolist())),
prediction={target_name: yp.tolist()})
@app.route('/data')
def show_data():
return jsonify(
data=dict(zip(data.feature_names, X.tolist())),
prediction={target_name: y.tolist()})
@app.route('/chart')
def chart():
loaded_model = joblib.load(model_filename)
plot = scatter(X, y,
model=loaded_model,
title=data_id,
xlabel=feature_name,
ylabel=target_name)
script, div = components(plot)
head = '''
<link href="http://cdn.pydata.org/bokeh/release/bokeh-{}.min.css" rel="stylesheet" type="text/css">
<script src="http://cdn.pydata.org/bokeh/release/bokeh-{}.min.js"></script>
'''.format(bokeh.__version__, bokeh.__version__)
return render_template('chart.html',
page_title='Basic Machine Learning',
chart_title=str(loaded_model).split('(')[0],
chart_head=head,
chart_div=div,
chart_script=script)
def scatter(X, y, model=None, title=None, xlabel=None, ylabel=None):
'''bokeh plot'''
p = plt.figure(title=title,
x_axis_label=xlabel,
y_axis_label=ylabel)
p.circle(X[:,feature_index], y,
fill_color='blue',
fill_alpha=0.8,
size=8)
if not model is None:
N = 100
Xp = np.empty((N, X.shape[1]))
Xp[:,:] = X.mean(axis=0)
Xp[:,feature_index] = np.linspace(X[:,feature_index].min(),
X[:,feature_index].max(), N)
yp = model.predict(Xp)
p.line(Xp[:,feature_index], yp,
line_color='red',
line_width=2)
return p
if __name__ == '__main__':
app.run(port=5000, host='0.0.0.0', debug=False)
|
mit
|
johndamen/pyeasyplot
|
easyplot/gui/settings.py
|
1
|
8168
|
from PyQt4 import QtGui, QtCore
from . import basewidgets as bw
from .utils import clear_layout
from functools import partial
from matplotlib import pyplot as plt
from collections import OrderedDict, ChainMap
import numpy as np
class SettingsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.build()
def build(self):
pass
class FigureSettings(SettingsWidget):
changed = QtCore.pyqtSignal()
current_axes_changed = QtCore.pyqtSignal(tuple)
def __init__(self, figure_manager, parent=None):
self.figure_manager = figure_manager
super().__init__(parent=parent)
def build(self):
self.layout = QtGui.QFormLayout(self)
self.axnum_field = bw.Int(self.figure_manager.ax_count())
self.axnum_field.value_changed.connect(self.set_ax_count)
self.layout.addRow('axes', self.axnum_field)
self.axrownum_field = bw.Int(self.figure_manager.axrow_count())
self.axrownum_field.value_changed.connect(self.set_axrow_count)
self.layout.addRow('rows', self.axrownum_field)
self.ax_pos_layout = QtGui.QVBoxLayout()
self.ax_pos_layout.setContentsMargins(10, 0, 0, 0)
self.layout.addRow(self.ax_pos_layout)
self.fill_ax_positions()
self.style_dd = bw.Dropdown(plt.style.available, default_index=plt.style.available.index('ggplot'))
self.style_dd.value_changed.connect(self.set_style)
self.layout.addRow('style', self.style_dd)
def fill_ax_positions(self):
self.axfields = []
current_axes = self.figure_manager.gca()
for i, a in enumerate(self.figure_manager.axes):
fs = AxPosField(i, a.position, selected=a is current_axes)
self.axfields.append(fs)
fs.toggled.connect(partial(self.set_current_axes, i))
fs.value_changed.connect(partial(self.edit_axes, i))
self.ax_pos_layout.addWidget(fs)
def set_current_axes(self, i):
prev = self.figure_manager.gca()
if self.axfields[i].is_selected():
self.figure_manager.current_index = i
current_index = self.figure_manager.current_index
for i, f in enumerate(self.axfields):
if i == current_index:
f.set_selected(True)
else:
f.set_selected(False)
new = self.figure_manager.gca()
self.current_axes_changed.emit((prev, new))
def edit_axes(self, i, pos):
w, h = pos[2:]
if w == 0 or h == 0:
return
self.figure_manager.axes[i].set_position(pos)
self.reload_ax_positions()
self.changed.emit()
def set_ax_count(self, v):
if v is None:
return
elif v < 1:
QtGui.QMessageBox.warning(self, 'invalid number of axes', 'at least 1 axes required')
return
elif v > 20:
QtGui.QMessageBox.warning(self, 'invalid number of axes', 'number of axes larger than 20 not allowed')
return
self.figure_manager.set_ax_count(v, reset=False)
self.reload_ax_positions()
self.changed.emit()
def set_axrow_count(self, v):
if v is None:
return
elif v < 1:
QtGui.QMessageBox.warning(self, 'invalid number of rows', 'at least 1 row required')
return
elif v > 10:
QtGui.QMessageBox.warning(self, 'invalid number of rows', 'number of rows larger than 10 not allowed')
return
self.figure_manager.set_axrow_count(v)
self.reload_ax_positions()
self.changed.emit()
def set_style(self, s):
self.figure_manager.set_style(s)
self.changed.emit()
def reload_ax_positions(self):
clear_layout(self.ax_pos_layout)
self.fill_ax_positions()
class AxesSettings(SettingsWidget):
changed = QtCore.pyqtSignal(dict)
def __init__(self, parent=None):
super().__init__(parent=parent)
def get_defaults(self):
return dict(title=self.ax.get_title(),
xlabel=self.ax.get_xlabel(),
ylabel=self.ax.get_ylabel(),
xlim=(None, None),
ylim=(None, None),
aspect=None)
def build(self):
self.fields = OrderedDict()
self.layout = QtGui.QFormLayout(self)
defaults = self.get_defaults()
self.fields['title'] = bw.Text(defaults['title'])
self.fields['xlabel'] = bw.Text(defaults['xlabel'])
self.fields['ylabel'] = bw.Text(defaults['ylabel'])
self.fields['xlim'] = bw.MinMax(defaults['xlim'])
self.fields['ylim'] = bw.MinMax(defaults['ylim'])
for k, v in self.fields.items():
v.value_changed.connect(self.change)
self.layout.addRow(k, v)
self.aspect_field = bw.Checkbox(defaults['aspect'] == 'equal')
self.aspect_field.value_changed.connect(self.change)
self.layout.addRow('aspect equal', self.aspect_field)
def change(self, *args):
self.changed.emit(self.kwargs)
@property
def kwargs(self):
data = {k: v.value() for k, v in self.fields.items()}
if self.aspect_field.value():
data['aspect'] = 'equal'
return data
def set_kwargs(self, reset=False, **kwargs):
kwargs = ChainMap(kwargs, self.get_defaults())
for k, v in kwargs.items():
if k in self.fields:
self.fields[k].set_value(v)
if k == 'aspect':
self.aspect_field.set_value(v == 'equal')
@property
def ax(self):
return plt.gca()
class LayerSettings(SettingsWidget):
pass
class PlotStack(SettingsWidget):
changed = QtCore.pyqtSignal(dict)
def __init__(self):
super().__init__()
def build(self):
self.layout = QtGui.QVBoxLayout(self)
self.stack = QtGui.QStackedWidget()
self.layout.addWidget(self.stack)
w = PlotSettings()
self.widgets = dict(default=w)
w.changed.connect(self.changed.emit)
self.stack.addWidget(self.widgets['default'])
def for_dataset(self, d):
name = d.__class__.__name__
if name not in self.widgets:
from .plotsettings import get_by_dataset
self.widgets[name] = w = get_by_dataset(d)()
w.changed.connect(self.changed.emit)
self.stack.addWidget(w)
self.stack.setCurrentWidget(self.widgets[name])
@property
def fields(self):
return self.stack.currentWidget().fields
class PlotSettings(SettingsWidget):
changed = QtCore.pyqtSignal(dict)
def __init__(self, parent=None):
super().__init__(parent=parent)
def build(self):
self.layout = QtGui.QFormLayout(self)
self.fields = OrderedDict()
def change(self, *args):
data = dict()
for k, field in self.fields.items():
v = field.value()
if v is None:
continue
data[k] = v
self.changed.emit(data)
class LegendSettings(SettingsWidget):
pass
class ColorbarSettings(SettingsWidget):
pass
class AxPosField(QtGui.QWidget):
value_changed = QtCore.pyqtSignal(object)
toggled = QtCore.pyqtSignal(bool)
def __init__(self, i, pos, selected=False):
super().__init__()
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(3)
self.label = bw.ToggleLabel(str(i), selected=selected)
self.label.setFixedWidth(30)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.label.toggled.connect(self.toggled)
self.layout.addWidget(self.label)
self.fields = bw.Fieldset(bw.Float, val=pos, fmt='{:.2f}', field_width=40, onchange=False)
self.fields.value_changed.connect(self.value_changed.emit)
self.layout.addWidget(self.fields)
def set_selected(self, b):
self.label.set_selected(b)
def is_selected(self):
return self.label.selected
|
gpl-3.0
|
assad2012/ggplot
|
ggplot/scales/scale_colour_gradient.py
|
12
|
2017
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter
def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):
return [rgb2hex(cmap(bb)[:3]) for bb in breaks]
class scale_colour_gradient(scale):
"""
Specify a two- or three-point gradient.
Parameters
----------
name : Name of an existing gradient scheme
limits : list of the upper and lower bounds of the gradient
low : colour at the lower bound of the gradient
mid : colour at the middle of the gradient
high : Colour at the upper bound of the gradient
Examples
--------
>>> from ggplot import *
>>> diamons_premium = diamonds[diamonds.cut=='Premium']
>>> gg = ggplot(diamons_premium, aes(x='depth', y='carat', colour='price')) + \\
... geom_point()
>>> print(gg + scale_colour_gradient(low='red', mid='white', high='blue', limits=[4000,6000]) + \\
... ggtitle('With red-blue gradient'))
>>> print(gg + ggtitle('With standard gradient'))
"""
VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.color_label = self.name
if not (self.limits is None):
gg.color_limits = self.limits
color_spectrum = []
if self.low:
color_spectrum.append(self.low)
if self.mid:
color_spectrum.append(self.mid)
if self.high:
color_spectrum.append(self.high)
if self.low and self.high:
gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)
plt.cm.register_cmap(cmap=gradient2n)
# add them back to ggplot
gg.color_scale = colors_at_breaks(gradient2n)
gg.colormap = gradient2n
return gg
|
bsd-2-clause
|
phockett/ePSproc
|
epsproc/MFPAD.py
|
1
|
8592
|
# -*- coding: utf-8 -*-
r"""
ePSproc MFPAD functions
-----------------------
05/08/19 v1 Initial python version.
Based on original Matlab code ePS_MFPAD.m
Structure
---------
Calculate MFPAD on a grid from input ePS matrix elements.
Use fast functions, pre-calculate if possible.
Data in Xarray, use selection functions and multiplications based on relevant quantum numbers, other axes summed over.
Choices for functions...
* `Moble's spherical functions (quaternion based) <https://github.com/moble/spherical_functions>`_
Provides fast wignerD, 3j and Ylm functions, uses Numba.
Install with:
>>> conda install -c conda-forge spherical_functions
* `Scipy special.sph_harm <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sph_harm.html#scipy.special.sph_harm>`_
To Do
-----
* Propagate scale-factor to Mb.
* Benchmark on NO2 reference results.
* ~~Test over multiple E.~~
* Test over multiple R.
* More efficient computation? Use Xarray group by?
Formalism
---------
From `ePSproc: Post-processing suite for ePolyScat electron-molecule scattering calculations <https://www.authorea.com/users/71114/articles/122402/_show_article>`_.
.. math::
I_{\mu_{0}}(\theta_{\hat{k}},\phi_{\hat{k}},\theta_{\hat{n}},\phi_{\hat{n}})=\frac{4\pi^{2}E}{cg_{p_{i}}}\sum_{\mu_{i},\mu_{f}}|T_{\mu_{0}}^{p_{i}\mu_{i},p_{f}\mu_{f}}(\theta_{\hat{k}},\phi_{\hat{k}},\theta_{\hat{n}},\phi_{\hat{n}})|^{2}\label{eq:MFPAD}
T_{\mu_{0}}^{p_{i}\mu_{i},p_{f}\mu_{f}}(\theta_{\hat{k}},\phi_{\hat{k}},\theta_{\hat{n}},\phi_{\hat{n}})=\sum_{l,m,\mu}I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)Y_{lm}^{*}(\theta_{\hat{k}},\phi_{\hat{k}})D_{\mu,\mu_{0}}^{1}(R_{\hat{n}})\label{eq:TMF}
I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)=\langle\Psi_{i}^{p_{i},\mu_{i}}|\hat{d_{\mu}}|\Psi_{f}^{p_{f},\mu_{f}}\varphi_{klm}^{(-)}\rangle\label{eq:I}
In this formalism:
* :math:`I_{l,m,\mu}^{p_{i}\mu_{i},p_{f}\mu_{f}}(E)` is the radial part of the dipole matrix element, determined from the initial and final state electronic wavefunctions :math:`\Psi_{i}^{p_{i},\mu_{i}}` and :math:`\Psi_{f}^{p_{f},\mu_{f}}`, photoelectron wavefunction :math:`\varphi_{klm}^{(-)}` and dipole operator :math:`\hat{d_{\mu}}`. Here the wavefunctions are indexed by irreducible representation (i.e. symmetry) by the labels :math:`p_{i}` and :math:`p_{f}`, with components :math:`\mu_{i}` and :math:`\mu_{f}` respectively; :math:`l,m` are angular momentum components, :math:`\mu` is the projection of the polarization into the MF (from a value :math:`\mu_{0}` in the LF). Each energy and irreducible representation corresponds to a calculation in ePolyScat.
* :math:`T_{\mu_{0}}^{p_{i}\mu_{i},p_{f}\mu_{f}}(\theta_{\hat{k}},\phi_{\hat{k}},\theta_{\hat{n}},\phi_{\hat{n}})` is the full matrix element (expanded in polar coordinates) in the MF, where :math:`\hat{k}` denotes the direction of the photoelectron :math:`\mathbf{k}`-vector, and :math:`\hat{n}` the direction of the polarization vector :math:`\mathbf{n}` of the ionizing light. Note that the summation over components :math:`\{l,m,\mu\}` is coherent, and hence phase sensitive.
* :math:`Y_{lm}^{*}(\theta_{\hat{k}},\phi_{\hat{k}})` is a spherical harmonic.
* :math:`D_{\mu,\mu_{0}}^{1}(R_{\hat{n}})` is a Wigner rotation matrix element, with a set of Euler angles :math:`R_{\hat{n}}=(\phi_{\hat{n}},\theta_{\hat{n}},\chi_{\hat{n}})`, which rotates/projects the polarization into the MF.
* :math:`I_{\mu_{0}}(\theta_{\hat{k}},\phi_{\hat{k}},\theta_{\hat{n}},\phi_{\hat{n}})` is the final (observable) MFPAD, for a polarization :math:`\mu_{0}` and summed over all symmetry components of the initial and final states, :math:`\mu_{i}` and :math:`\mu_{f}`. Note that this sum can be expressed as an incoherent summation, since these components are (by definition) orthogonal.
* :math:`g_{p_{i}}` is the degeneracy of the state :math:`p_{i}`.
"""
# Imports
import numpy as np
import pandas as pd
import xarray as xr
# Special functions
# from scipy.special import sph_harm
import spherical_functions as sf
import quaternion
# Package fns.
# TODO: tidy this up!
from epsproc.util import matEleSelector
from epsproc.sphCalc import sphCalc
def mfpad(dataIn, thres = 1e-2, inds = {'Type':'L','it':1}, res = 50, R = None, p = 0):
"""
Parameters
----------
dataIn : Xarray
Contains set(s) of matrix elements to use, as output by epsproc.readMatEle().
thres : float, optional, default 1e-2
Threshold value for matrix elements to use in calculation.
ind : dictionary, optional.
Used for sub-selection of matrix elements from Xarrays.
Default set for length gauage, single it component only, inds = {'Type':'L','it':'1'}.
res : int, optional, default 50
Resolution for output (theta,phi) grids.
R : list of Euler angles or quaternions, optional.
Define LF > MF polarization geometry/rotations.
For default case (R = None), 3 geometries are calculated, corresponding to z-pol, x-pol and y-pol cases.
Defined by Euler angles (p,t,c) = [0 0 0] for z-pol, [0 pi/2 0] for x-pol, [pi/2 pi/2 0] for y-pol.
p : int, optional.
Defines LF polarization state, p = -1...1, default p = 0 (linearly pol light along z-axis).
TODO: add summation over p for multiple pol states in LF.
Returns
-------
Ta
Xarray (theta, phi, E, Sym) of MFPADs, summed over (l,m)
Tlm
Xarray (theta, phi, E, Sym, lm) of MFPAD components, expanded over all (l,m)
"""
# Define reduced data from selection over all data
daRed = matEleSelector(dataIn, thres = 1e-2, inds = inds)
# Generate spherical harmonics
Lmax = daRed.l.max()
YlmX = sphCalc(Lmax, res = res)
# Reindex to match data (should happen automagically, but not always!)
# YlmXre = YlmX.reindex_like(daRed)
# Set rotation angles for LF > MF
if R is None:
# Set (x,y,z) projection terms only
# Nangs = 10
# pRot = np.linspace(0,180,Nangs)
# tRot = np.linspace(0,90,Nangs)
# cRot = np.linspace(0,180,Nangs)
# eAngs = np.array([pRot, tRot, cRot,])*np.pi/180
# Convert to quaternions
# R = quaternion.from_euler_angles(pRot*np.pi/180, tRot*np.pi/180, cRot*np.pi/180)
# Eugler angles for rotation of LF->MF, set as [0 0 0] for z-pol, [0 pi/2 0] for x-pol, [pi/2 pi/2 0] for y-pol
pRot = [0, 0, np.pi/2]
tRot = [0, np.pi/2, np.pi/2]
cRot = [0, 0, 0]
eAngs = np.array([pRot, tRot, cRot]) # List form to use later
Euler = pd.MultiIndex.from_arrays(eAngs, names = ['P','T','C'])
# Convert to quaternions
R = quaternion.from_euler_angles(pRot, tRot, cRot)
#**************** Calculate MFPADs
Tlm = []
Ta = []
# Loop over pol geoms R
for n, Rcalc in enumerate(R):
T = []
# Loop over mu terms and multiply
for mu in np.arange(-1,2):
# Set by element replacement (preserves whole structure)
# daTemp = daRed.copy() # Set explicit copy for rotation.
# daTemp.loc[{'mu':mu}].values = daTemp.loc[{'mu':mu}].values * sf.Wigner_D_element(Rcalc, 1, mu, 0).conj()
# Issues with reindexing to extra coords at the moment, so reindex and multiply for specific mu only
# daTemp = daTemp.sel({'mu':mu})
# YlmXre = YlmX.reindex_like(daTemp)
# T.append(YlmXre.conj() * daTemp) # Output full (l,m,mu) expansion
# Set by looping and selection
daTemp = daRed.sel({'mu':mu}) * sf.Wigner_D_element(Rcalc, 1, mu, 0).conj()
YlmXre = YlmX.reindex_like(daTemp)
T.append(YlmXre.conj() * daTemp) # Output full (l,m,mu) expansion
# Concat & sum over symmetries
Ts = xr.combine_nested([T[0], T[1], T[2]], concat_dim=['LM'])
# Add dims - currently set for Euler angles only.
# Can't seem to add mutiindex as a single element, so set dummy coord here and replace below.
Ts = Ts.expand_dims({'Euler':[n]}) # Set as index
# Ts = Ts.expand_dims({'p':[eAngs[0,n]], 't':[eAngs[1,n]], 'c':[eAngs[2,n]]})
Tlm.append(Ts)
Ta.append(Ts.sum(dim = 'LM'))
TlmX = xr.combine_nested(Tlm, concat_dim=['Euler'])
TaX = xr.combine_nested(Ta, concat_dim=['Euler'])
# Assign Euler angles to dummy dim
TlmX = TlmX.assign_coords(Euler = Euler)
TaX = TaX.assign_coords(Euler = Euler)
return TaX, TlmX # , Ta, Tlm # For debug also return lists
|
gpl-3.0
|
csdms-contrib/gisknickfinder
|
GISKnickFinder.py
|
1
|
6099
|
#Name: GISKnickFinder.
#This python code can be used to find knickpoints and extract information about streams, it utilizes built-in functions of ArcGIS.
#Copyright (C) 2012 Francis Rengers
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; version 2
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import arcpy, math, numpy as np
from arcpy import env
from arcpy.sa import *
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import ArcFuncs
reload(ArcFuncs)
env.workspace = "C:/GIS/Folder"
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = 1
#########################################################
#Input
Watershed = "Path to DEM Watershed (this must be larger than the size of your watershed shapefile"
WatershedMask = env.workspace+"Watershed.shp"
Streamtops=env.workspace+"StreamTop.shp" #This is a point file for the top of each of the streams that you are interested in
SpatialR="C:/Program Files (x86)/ArcGIS/Coordinate Systems/Projected Coordinate Systems/UTM/NAD 1983/NAD 1983 UTM Zone 13N.prj" #This is your projection (I left mine in here just so you could see what one looks like)
CurvThres=-15 #This is the curvature value that highlights your knickpoint. If this value is too high you will have more knickpoints than actually exist. If it is too low then you won't find all of the knickpoints. So you should play around with it.
FlowAccThres=5000 #This is the drainage area threshold. If it is very small, you will highlight knickpoints that are not in your channel. If it is too high then you will miss knickpoints in your channel.
##############################################
#Output
Curvpro=env.workspace+"/curvpropy"
Rastpoly=env.workspace+"/Rastpoly.shp"
Dissolvepoly=env.workspace+"/Dissolvpoly.shp"
HCpnts=env.workspace+"/HCpnts.shp"
FlowdirArc=env.workspace+ "/FlowdirArc"
Costpath = env.workspace+"/costtohcs.shp"
Fillgrid=env.workspace+"/FillWS"
FlowAcc=env.workspace+"/FlowAccArc"
dxRaster=env.workspace+"/dxRas"
HClocations=env.workspace +"/HCLocations"
ncostpath=env.workspace+"/outcost"
HCraster=env.workspace+"/HCraster"
DAreaatHC=env.workspace+"/DAreaatHC"
StrPathPnts=env.workspace+"/StrPathPnts"
indstrcostpath=env.workspace+"/strpath"
try:
########################################################################
#ARCGIS OPERATIONS
#First I'll clip the watershed, fill the DEM, create the flow direction, and flow accumulation grids
Watershedstuff=ArcFuncs.WatShedfunc(Watershed, WatershedMask, Fillgrid, FlowdirArc, FlowAcc)
#Next I take the flow direction grid and recode it to reflect distance
#dxgrid=ArcFuncs.dxfunc(Newflowdir, dxRaster)
dxgrid=ArcFuncs.dxfunc(Raster(FlowdirArc), dxRaster)
#In this section I will obtain the locations of the headcuts and I will simplify them to make sure that I just have one point
#at each headcut
#Allhcs=ArcFuncs.findhcs(outFill, Curvpro, outFlowAccumulation, HClocations, Rastpoly, Dissolvepoly, HCpnts)
Allhcs=ArcFuncs.findhcs(Raster(Fillgrid), Curvpro, CurvThres, FlowAccThres, Raster(FlowAcc), HClocations, Rastpoly, Dissolvepoly, HCpnts)
#Now I will try to calculate the least cost path to all of the headcuts.
#Make a search cursor on the shapefile with all of the headcut locations
#LCostPth=ArcFuncs.leastcostfunc(HCs, SpatialR, Fillgrid, Newflowdir, ncostpath)
CountLCostPth=ArcFuncs.leastcostfunc(HCpnts, SpatialR, Fillgrid, Raster(FlowdirArc), ncostpath)
#Count=np.savetxt('Count.txt',LCostPth)
#I need to calculate the least cost path to the tops of all of the streams too. I will use this to
#check the K and m values that I obtain.
CountStTop=ArcFuncs.leastcostfunc(Streamtops, SpatialR, Fillgrid, Raster(FlowdirArc),indstrcostpath)
######################################################################
#PYTHON OPERATIONS
#Now I create 3 python lists, one for the least cost path
#this is an array of all ones, the second array is the drainage area corresponding
#to each cell in the path, and the third array is the dx array showing the distance to the next cell.
#First I do this just for the headcuts
PathArrays=ArcFuncs.hcpathvectors2(FlowAcc, dxRaster, ncostpath, CountLCostPth, Fillgrid)
areaarray=PathArrays[0]
dxarray=PathArrays[1]
for i in range(0,len(areaarray)):
savearea=np.asarray(areaarray[i],dtype='float')
savedx=np.asarray(dxarray[i],dtype='float')
np.savetxt(env.workspace+'/AreaArray'+str(i)+'.txt',savearea)
np.savetxt(env.workspace+'/dxArray'+str(i)+'.txt',savedx)
#Next I do this just for the stream paths
StrPathArrays=ArcFuncs.hcpathvectors2(FlowAcc, dxRaster, indstrcostpath, CountStTop, Fillgrid)
strareaarray=StrPathArrays[0]
strdxarray=StrPathArrays[1]
strelevarray=StrPathArrays[2]
for i in range(0,len(strareaarray)):
savestrarea=np.asarray(strareaarray[i],dtype='float')
savestrdx=np.asarray(strdxarray[i],dtype='float')
savestrelev=np.asarray(strelevarray[i],dtype='float')
np.savetxt(env.workspace+'/StrAreaArray'+str(i)+'.txt',savestrarea)
np.savetxt(env.workspace+'/StrdxArray'+str(i)+'.txt',savestrdx)
np.savetxt(env.workspace+'/StrElevArray'+str(i)+'.txt',savestrelev)
#Now I want to grab the X, Y coordinates of each cell in the raster.
StrXY=ArcFuncs.getxyofstr(CountStTop, indstrcostpath, StrPathPnts)
for i in range(0,len(StrXY)):
savestrXY=np.asarray(StrXY[i],dtype='float')
np.savetxt(env.workspace+'/StrXY'+str(i)+'.txt',savestrXY)
print "It worked!"
except:
print "Wah Wah"
#print arcpy.GetMessage()
|
gpl-2.0
|
kagayakidan/scikit-learn
|
examples/mixture/plot_gmm_sin.py
|
248
|
2747
|
"""
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
bendalab/thunderfish
|
docs/pulseplots.py
|
3
|
40213
|
"""
## Plot and save key steps in pulses.py for visualizing the alorithm.
"""
import pickle, glob
import numpy as np
from scipy import stats
from matplotlib import rcParams, gridspec, ticker
import matplotlib.pyplot as plt
try:
from matplotlib.colors import colorConverter as cc
except ImportError:
import matplotlib.colors as cc
try:
from matplotlib.colors import to_hex
except ImportError:
from matplotlib.colors import rgb2hex as to_hex
from matplotlib.patches import ConnectionPatch, Rectangle
from matplotlib.lines import Line2D
import warnings
def warn(*args,**kwargs):
"""
Ignore all warnings.
"""
pass
warnings.warn=warn
# plotting parameters and colors
rcParams['font.family'] = 'monospace'
cmap = plt.get_cmap("Dark2")
c_g = cmap(0)
c_o = cmap(1)
c_grey = cmap(7)
cmap_pts = [cmap(2),cmap(3)]
def darker(color, saturation):
"""Make a color darker.
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
saturation: float
The smaller the saturation, the darker the returned color.
A saturation of 0 returns black.
A saturation of 1 leaves the color untouched.
A saturation of 2 returns white.
Returns
-------
color: string or dictionary
The darker color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the darker color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
if saturation > 2:
sauration = 2
if saturation > 1:
return lighter(color, 2.0-saturation)
if saturation < 0:
saturation = 0
r, g, b = cc.to_rgb(color)
rd = r*saturation
gd = g*saturation
bd = b*saturation
return to_hex((rd, gd, bd)).upper()
def lighter(color, lightness):
"""Make a color lighter.
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
lightness: float
The smaller the lightness, the lighter the returned color.
A lightness of 0 returns white.
A lightness of 1 leaves the color untouched.
A lightness of 2 returns black.
Returns
-------
color: string or dict
The lighter color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the lighter color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
if lightness > 2:
lightness = 2
if lightness > 1:
return darker(color, 2.0-lightness)
if lightness < 0:
lightness = 0
r, g, b = cc.to_rgb(color)
rl = r + (1.0-lightness)*(1.0 - r)
gl = g + (1.0-lightness)*(1.0 - g)
bl = b + (1.0-lightness)*(1.0 - b)
return to_hex((rl, gl, bl)).upper()
def xscalebar(ax, x, y, width, wunit=None, wformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Horizontal scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
width: float
Length of the scale bar in units of the data's x-values.
wunit: string or None
Optional unit of the data's x-values.
wformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left', 'right', or 'center'
Scale bar aligned left, right, or centered to (x, y)
va: 'top' or 'bottom'
Label of the scale bar either above or below the scale bar.
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float, None
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if wformat is None:
wformat = '%.0f'
if width < 1.0:
wformat = '%.1f'
try:
ls = wformat % width
width = float(ls)
except TypeError:
ls = wformat
# bar:
if ha == 'left':
x0 = x
x1 = x+width
elif ha == 'right':
x0 = x-width
x1 = x
else:
x0 = x-0.5*width
x1 = x+0.5*width
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x0, x1], [y, y], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get y position of line in figure pixel coordinates:
ly = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dy = capsize*dyu
ax.plot([x0, x0], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
ax.plot([x1, x1], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
# label:
if wunit:
ls += u'\u2009%s' % wunit
if va == 'top':
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='bottom', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
dty = ly+0.5*lw + 2.0 - ty
else:
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='top', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,1]
dty = ly-0.5*lw - 2.0 - ty
th.set_position((0.5*(x0+x1), y+dyu*dty))
return x0, x1, y
def yscalebar(ax, x, y, height, hunit=None, hformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Vertical scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
height: float
Length of the scale bar in units of the data's y-values.
hunit: string
Unit of the data's y-values.
hformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left' or 'right'
Label of the scale bar either to the left or to the right
of the scale bar.
va: 'top', 'bottom', or 'center'
Scale bar aligned above, below, or centered on (x, y).
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if hformat is None:
hformat = '%.0f'
if height < 1.0:
hformat = '%.1f'
try:
ls = hformat % height
width = float(ls)
except TypeError:
ls = hformat
# bar:
if va == 'bottom':
y0 = y
y1 = y+height
elif va == 'top':
y0 = y-height
y1 = y
else:
y0 = y-0.5*height
y1 = y+0.5*height
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x, x], [y0, y1], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get x position of line in figure pixel coordinates:
lx = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dx = capsize*dxu
ax.plot([x-dx, x+dx], [y0, y0], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
ax.plot([x-dx, x+dx], [y1, y1], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
# label:
if hunit:
ls += u'\u2009%s' % hunit
if ha == 'right':
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='left', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
dtx = lx+0.5*lw + 2.0 - tx
else:
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='right', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,0]
dtx = lx-0.5*lw - 1.0 - tx
th.set_position((x+dxu*dtx, 0.5*(y0+y1)))
return x, y0, y1
def arrowed_spines(ax, ms=10):
""" Create an arrowed spine on the y-axis of a plot.
Parameters
----------
ax : matplotlib figure axis
Axis on which the arrow should be plot.
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.scatter([xmin],[ymax],s=ms,marker='^', clip_on=False,color='k')
ax.set_xlim([xmin,xmax])
ax.set_ylim([ymin,ymax])
def loghist(ax,x,bmin,bmax,n,c,orientation='vertical',label=''):
""" Plot histogram with logarithmic scale.
Parameters
----------
ax : matplotlib axis
Axis to plot the histogram on.
x : numpy array
Input data for histogram.
bmin : float
Minimum value for the histogram bins.
bmax : float
Maximum value for the histogram bins.
n : int
Number of bins.
c : matplotlib color
Color of histogram.
orientation : string (optional)
Histogram orientation.
Defaults to 'vertical'.
label : string (optional)
Label for x.
Defaults to '' (no label).
Returns
-------
n : array
The values of the histogram bins.
bins : array
The edges of the bins.
patches : BarContainer
Container of individual artists used to create the histogram.
"""
return ax.hist(x,bins=np.exp(np.linspace(np.log(bmin),np.log(bmax),n)),color=c,orientation=orientation,label=label)
def plot_eod_properties():
""" Plot 3 pulse-type EODs, one artefact and one wave snippet to visualize raw data.
Plot is saved in img/eod_properties.png.
"""
# create figure and grid
fig=plt.figure(figsize=(6,3))
gs = gridspec.GridSpec(2,4)
# define data files
np_files = ['data/pulse_eod_1','data/pulse_eod_2','data/pulse_eod_3','data/artefact_1','data/wave_eod_1']
# go through each data file and plot each snippet on a new axis.
for i, np_file in enumerate(np_files):
# plot the first snippet on a bigger axis
if i==0:
ax=fig.add_subplot(gs[:,:2])
elif i<3:
ax=fig.add_subplot(gs[i-1,2])
else:
ax=fig.add_subplot(gs[i-3,3])
[x,y] = np.load(np_file+'.npy')
ax.plot(x,y,c=cmap(i))
# plot the lines and annotation to show EOD features on the first snippet.
if i==0:
ax.plot([x[np.argmin(y)],x[np.argmax(y)]],[np.min(y),np.max(y)],linestyle='--',marker='o',c='k')
ax.plot([x[np.argmin(y)],x[np.argmax(y)]],[np.min(y),np.min(y)],linestyle='--',c='k')
ax.plot([x[np.argmax(y)],x[np.argmax(y)]],[np.min(y),np.max(y)],linestyle='--',c='k')
ax.annotate('w',[0.5*(x[np.argmin(y)]+x[np.argmax(y)]),np.min(y)],xytext=(0, -12), textcoords='offset points',fontsize=14,ha='center')
ax.annotate('h',[x[np.argmax(y)],0.5*(np.min(y)+np.max(y))],xytext=(3, 0), textcoords='offset points',fontsize=14,ha='left')
ax.annotate('s',[0.5*(x[np.argmin(y)]+x[np.argmax(y)]),0.5*(np.min(y)+np.max(y))],xytext=(-12, 0), textcoords='offset points',fontsize=14)
h = np.max(y)-np.min(y)
w = np.abs(x[np.argmax(y)]-x[np.argmin(y)])
s = h/w
# annotate the EOD height, width and slope for each snippet.
if i==0:
ax.text(0.1, -0.04,u"h = $%.2f$" "\n" u"w = $%.2f\u2009ms$" "\n" u"s = $%.2f\u2009ms^{-1}$"%(h,w,s),
horizontalalignment='left',
verticalalignment='top',
transform = ax.transAxes,
bbox={'linewidth':0,'facecolor':'None'})
else:
ax.text(0.1, -0.1,r"h = $%.2f$" "\n" u"w = $%.2f\u2009ms$" "\n" u"s = $%.2f\u2009ms^{-1}$"%(h,w,s),
horizontalalignment='left',
verticalalignment='top',
transform = ax.transAxes,
bbox={'linewidth':0,'facecolor':'None'})
ax.axis('off')
plt.tight_layout()
plt.savefig('img/EOD_properties.png')
plt.show()
def plot_peak_detection():
""" Plot 2 pulse-type EOD snippet and all consecutive peak detection steps.
Plot is saved in img/peak_detection.png.
"""
# load variables to plot
np_files = ['data/peakdata_1','data/peakdata_2']
# create figure and grid
fig = plt.figure(figsize=(6,3))
gs = gridspec.GridSpec(2,4)
# go through each data file
for i,np_file in enumerate(np_files):
# load all peak data from the zipped numpy files
with np.load(np_file+'.npz') as npd:
data = npd['data']
p1 = npd['p1']
p2 = npd['p2']
p3 = npd['p3']
p4 = npd['p4']
# plot peak detection step nr 1
ax = fig.add_subplot(gs[i,0])
ax.axis('off')
if i==0:
ax.set_title('1.')
ax.plot(data[0],data[1],c=cmap(7),alpha=0.5)
ax.plot(p1[0],p1[1],'o', c=cmap(0), alpha=0.75)
ax.plot(p1[2],p1[3],'o', c=cmap(1), alpha=0.75)
# plot peak detection step nr 2
ax = fig.add_subplot(gs[i,1])
ax.axis('off')
if i==0:
ax.set_title('2.')
ax.plot(data[0],data[1],c=cmap(7),alpha=0.5)
ax.plot(p2[0],p2[1],'o', c=cmap(0), alpha=0.75)
ax.plot(p2[2],p2[3],'o', c=cmap(1), alpha=0.75)
ax.plot(np.vstack((p2[0],p2[2])),np.vstack((p2[1],p2[3])),linestyle='--',c='k')
# plot peak detection step nr 3
ax = fig.add_subplot(gs[i,2])
ax.axis('off')
if i==0:
ax.set_title('3.')
ax.plot(data[0],data[1],c=cmap(7),alpha=0.5)
ax.plot(p3[0],p3[1],'o', c=cmap(0), alpha=0.75)
ax.plot(p3[2],p3[3],'o', c=cmap(1), alpha=0.75)
ax.plot(np.vstack((p3[0],p3[2])),np.vstack((p3[1],p3[3])),linestyle='--',c='k')
# plot peak detection step nr 4
ax = fig.add_subplot(gs[i,3])
ax.axis('off')
if i==0:
ax.set_title('4.')
ax.plot(data[0],data[1],c=cmap(7),alpha=0.5)
ax.plot(p4[0],p4[1],'o', c=cmap(0), alpha=0.75)
ax.plot(p4[2],p4[3],'o', c=cmap(1), alpha=0.75)
ax.plot(np.vstack((p4[0],p4[2])),np.vstack((p4[1],p4[3])),linestyle='--',c='k')
plt.tight_layout()
plt.savefig('img/peak_detection.png')
plt.show()
def plot_clustering():
""" Plot all clustering steps for one wild recording.
Plot is saved in img/clustering.png.
"""
with np.load('data/clustering.npz',allow_pickle=True) as pd:
# extract all variables from the dictionary:
samplerate = pd['samplerate']
eod_widths = pd['EOD_widths']
eod_hights = pd['EOD_heights']
eod_shapes = pd['EOD_shapes'] #shapes, features, labels
disc_masks = pd['discarding_masks']
merge_masks = pd['merge_masks']
# create figure + transparant figure.
fig = plt.figure(figsize=(8,5))
transFigure = fig.transFigure.inverted()
# set up the figure layout
outer = gridspec.GridSpec(1,5,width_ratios=[1,1,2,1,2],left=0.05,right=0.95)
# set titles for each clustering step
titles = ['1. Widths','2. Heights','3. Shape','4. Pulse EODs','5. Merge']
for i, title in enumerate(titles):
title_ax = gridspec.GridSpecFromSubplotSpec(1,1,subplot_spec = outer[i])
ax = fig.add_subplot(title_ax[0])
ax.text(0,110,title,ha='center',va='bottom',clip_on=False)
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.axis('off')
# compute sizes for each axis
w_size = 1
h_size = len(eod_hights)
shape_size = np.sum([len(sl) for sl in eod_shapes[0]])
# count required axes sized for the last two plot columns.
disc_size = 0
merge_size= 0
for shapelabel, dmasks, mmasks in zip(eod_shapes[2],disc_masks,merge_masks):
for sl, dm, mm in zip(shapelabel,dmasks,mmasks):
uld1 = np.unique((sl[0]+1)*np.invert(dm[0]))
uld2 = np.unique((sl[1]+1)*np.invert(dm[1]))
disc_size = disc_size+len(uld1[uld1>0])+len(uld2[uld2>0])
uld1 = np.unique((sl[0]+1)*mm[0])
uld2 = np.unique((sl[1]+1)*mm[1])
merge_size = merge_size+len(uld1[uld1>0])+len(uld2[uld2>0])
# set counters to keep track of the plot axes
disc_block = 0
merge_block = 0
shape_count = 0
# create all axes
width_hist_ax = gridspec.GridSpecFromSubplotSpec(w_size,1,subplot_spec = outer[0])
hight_hist_ax = gridspec.GridSpecFromSubplotSpec(h_size,1,subplot_spec = outer[1])
shape_ax = gridspec.GridSpecFromSubplotSpec(shape_size,1, subplot_spec = outer[2])
shape_windows = [gridspec.GridSpecFromSubplotSpec(2,2, hspace=0.0, wspace=0.0, subplot_spec = shape_ax[i]) for i in range(shape_size)]
EOD_delete_ax = gridspec.GridSpecFromSubplotSpec(disc_size,1,subplot_spec=outer[3])
EOD_merge_ax = gridspec.GridSpecFromSubplotSpec(merge_size,1,subplot_spec=outer[4])
# plot width labels histogram
ax1 = fig.add_subplot(width_hist_ax[0])
# set axes features.
ax1.set_xscale('log')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.axes.xaxis.set_visible(False)
ax1.set_yticklabels([])
# indices for plot colors (dark to light)
colidxsw = -np.linspace(-1.25, -0.5, h_size)
for i, (wl, colw, uhl, eod_h, eod_h_labs, w_snip, w_feat, w_lab, w_dm, w_mm) in enumerate(zip(eod_widths[0], colidxsw, eod_hights[0], eod_hights[1], eod_hights[2], eod_shapes[0], eod_shapes[1], eod_shapes[2], disc_masks, merge_masks)):
# plot width hist
hw, _, _ = ax1.hist(eod_widths[1][eod_widths[2]==wl], bins=np.linspace(np.min(eod_widths[1]),np.max(eod_widths[1]),100),color=lighter(c_o,colw),orientation='horizontal')
# set arrow when the last hist is plot so the size of the axes are known.
if i == h_size-1:
arrowed_spines(ax1,ms=20)
# determine total size of the hight historgams now.
my,b = np.histogram(eod_h,bins=np.exp(np.linspace(np.min(np.log(eod_h)),np.max(np.log(eod_h)),100)))
maxy = np.max(my)
# set axes features for hight hist.
ax2 = fig.add_subplot(hight_hist_ax[h_size-i-1])
ax2.set_xscale('log')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.set_xlim([0.9,maxy])
ax2.axes.xaxis.set_visible(False)
ax2.set_yscale('log')
ax2.yaxis.set_major_formatter(ticker.NullFormatter())
ax2.yaxis.set_minor_formatter(ticker.NullFormatter())
# define colors for plots
colidxsh = -np.linspace(-1.25,-0.5,len(uhl))
for n, (hl, hcol, snippets, features, labels, dmasks, mmasks) in enumerate(zip(uhl, colidxsh, w_snip, w_feat, w_lab, w_dm, w_mm)):
hh,_,_=loghist(ax2,eod_h[eod_h_labs==hl],np.min(eod_h),np.max(eod_h),100,lighter(c_g,hcol),orientation='horizontal')
# set arrow spines only on last plot
if n==len(uhl)-1:
arrowed_spines(ax2,ms=10)
# plot line from the width histogram to the height histogram.
if n==0:
coord1 = transFigure.transform(ax1.transData.transform([np.median(hw[hw!=0]),np.median(eod_widths[1][eod_widths[2]==wl])]))
coord2 = transFigure.transform(ax2.transData.transform([0.9,np.mean(eod_h)]))
line = Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='grey',linewidth=0.5)
fig.lines.append(line)
# compute sizes of the eod_discarding and merge steps
s1 = np.unique((labels[0]+1)*(~dmasks[0]))
s2 = np.unique((labels[1]+1)*(~dmasks[1]))
disc_block = disc_block + len(s1[s1>0]) + len(s2[s2>0])
s1 = np.unique((labels[0]+1)*(mmasks[0]))
s2 = np.unique((labels[1]+1)*(mmasks[1]))
merge_block = merge_block + len(s1[s1>0]) + len(s2[s2>0])
axs = []
disc_count = 0
merge_count = 0
# now plot the clusters for peak and trough centerings
for pt, cmap_pt in zip([0,1],cmap_pts):
ax3 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,0])
ax4 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,1])
# remove axes
ax3.axes.xaxis.set_visible(False)
ax4.axes.yaxis.set_visible(False)
ax3.axes.yaxis.set_visible(False)
ax4.axes.xaxis.set_visible(False)
# set color indices
colidxss = -np.linspace(-1.25,-0.5,len(np.unique(labels[pt][labels[pt]>=0])))
j=0
for c in np.unique(labels[pt]):
if c<0:
# plot noise features + snippets
ax3.plot(features[pt][labels[pt]==c,0],features[pt][labels[pt]==c,1],'.',color='lightgrey',label='-1',rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T,linewidth=0.1,color='lightgrey',label='-1',rasterized=True)
else:
# plot cluster features and snippets
ax3.plot(features[pt][labels[pt]==c,0],features[pt][labels[pt]==c,1],'.',color=lighter(cmap_pt,colidxss[j]),label=c,rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T,linewidth=0.1,color=lighter(cmap_pt,colidxss[j]),label=c,rasterized=True)
# check if the current cluster is an EOD, if yes, plot it.
if np.sum(dmasks[pt][labels[pt]==c]) == 0:
ax = fig.add_subplot(EOD_delete_ax[disc_size-disc_block+disc_count])
ax.axis('off')
# plot mean EOD snippet
ax.plot(np.mean(snippets[pt][labels[pt]==c],axis=0),color=lighter(cmap_pt,colidxss[j]))
disc_count = disc_count + 1
# match colors and draw line..
coord1 = transFigure.transform(ax4.transData.transform([ax4.get_xlim()[1], ax4.get_ylim()[0] + 0.5*(ax4.get_ylim()[1]-ax4.get_ylim()[0])]))
coord2 = transFigure.transform(ax.transData.transform([ax.get_xlim()[0],ax.get_ylim()[0] + 0.5*(ax.get_ylim()[1]-ax.get_ylim()[0])]))
line = Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='grey',linewidth=0.5)
fig.lines.append(line)
axs.append(ax)
# check if the current EOD survives the merge step
# if so, plot it.
if np.sum(mmasks[pt,labels[pt]==c])>0:
ax = fig.add_subplot(EOD_merge_ax[merge_size-merge_block+merge_count])
ax.axis('off')
ax.plot(np.mean(snippets[pt][labels[pt]==c],axis=0),color=lighter(cmap_pt,colidxss[j]))
merge_count = merge_count + 1
j=j+1
if pt==0:
# draw line from hight cluster to EOD shape clusters.
coord1 = transFigure.transform(ax2.transData.transform([np.median(hh[hh!=0]),np.median(eod_h[eod_h_labs==hl])]))
coord2 = transFigure.transform(ax3.transData.transform([ax3.get_xlim()[0],ax3.get_ylim()[0]]))
line = Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='grey',linewidth=0.5)
fig.lines.append(line)
shape_count = shape_count + 1
if len(axs)>0:
# plot lines that indicate the merged clusters.
coord1 = transFigure.transform(axs[0].transData.transform([axs[0].get_xlim()[1]+0.1*(axs[0].get_xlim()[1]-axs[0].get_xlim()[0]), axs[0].get_ylim()[1]-0.25*(axs[0].get_ylim()[1]-axs[0].get_ylim()[0])]))
coord2 = transFigure.transform(axs[-1].transData.transform([axs[-1].get_xlim()[1]+0.1*(axs[-1].get_xlim()[1]-axs[-1].get_xlim()[0]), axs[-1].get_ylim()[0]+0.25*(axs[-1].get_ylim()[1]-axs[-1].get_ylim()[0])]))
line = Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='grey',linewidth=1)
fig.lines.append(line)
plt.savefig('img/clustering.png')
plt.show()
def plot_bgm(mode,ccol):
""" Plot a BGM clustering step either on EOD width or height.
Plot is saved in img/*mode*_clusters.png.
Parameters
----------
mode : string
Which cluster step to plot. Options are 'width' and 'height'.
ccol : matplotlib color
Color to use for histogram.
"""
if mode == 'width':
with np.load('data/BGM_width.npz',allow_pickle=True) as pd:
# extract all variables from the dictionary:
x = pd['x']
means = pd['BGM'][1]
variances = pd['BGM'][2]
weights = pd['BGM'][0] #shapes, features, labels
use_log = pd['use_log']
labels = pd['labels']
xlab = pd['xlab']
elif mode == 'height':
with np.load('data/BGM_height.npz',allow_pickle=True) as pd:
# extract all variables from the dictionary:
x = pd['x']
means = pd['BGM'][1]
variances = pd['BGM'][2]
weights = pd['BGM'][0] #shapes, features, labels
use_log = pd['use_log']
labels = pd['labels']
xlab = pd['xlab']
else:
print('define a mode (width or height)')
return 0
# get the transform that was used as BGM input
if use_log:
x_transform = stats.zscore(np.log(x))
xplot = np.exp(np.linspace(np.log(np.min(x)),np.log(np.max(x)),1000))
else:
x_transform = stats.zscore(x)
xplot = np.linspace(np.min(x),np.max(x),1000)
# compute the x values and gaussians
x2 = np.linspace(np.min(x_transform),np.max(x_transform),1000)
gaussians = []
gmax = 0
for i, (w,m,std) in enumerate(zip(weights, means, variances)):
gaus = np.sqrt(w*stats.norm.pdf(x2,m,np.sqrt(std)))
gaussians.append(gaus)
gmax = max(np.max(gaus),gmax)
# compute classes defined by gaussian intersections
classes = np.argmax(np.vstack(gaussians),axis=0)
# find the minimum of any gaussian that is within its class
gmin = 100
for i,c in enumerate(np.unique(classes)):
gmin=min(gmin,np.min(gaussians[c][classes==c]))
# set up the figure
fig, ax1 = plt.subplots(figsize=(5,3.5))
fig_ysize = 4
ax2 = ax1.twinx()
ax1.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.set_xlabel('x [a.u.]')
ax1.set_ylabel('#')
ax2.set_ylabel('Likelihood')
ax2.set_yscale('log')
ax1.set_yscale('log')
if use_log:
ax1.set_xscale('log')
ax1.set_xlabel(xlab)
# define colors for plotting gaussians
colidxs = -np.linspace(-1.25,-0.5,len(np.unique(classes)))
# plot the gaussians
for i,c in enumerate(np.unique(classes)):
ax2.plot(xplot,gaussians[c],c=lighter(c_grey,colidxs[i]),linewidth=2,label=r'$N(\mu_%i,\sigma_%i)$'%(c,c))
# plot intersection lines
ax2.vlines(xplot[1:][np.diff(classes)!=0],0,gmax/gmin,color='k',linewidth=2,linestyle='--')
ax2.set_ylim([gmin,np.max(np.vstack(gaussians))*1.1])
# plot data distributions and classes
colidxs = -np.linspace(-1.25,-0.5,len(np.unique(labels)))
for i,l in enumerate(np.unique(labels)):
if use_log:
h,binn,_=loghist(ax1,x[labels==l],np.min(x),np.max(x),100,lighter(ccol,colidxs[i]),label=r'$x_%i$'%l)
else:
h,binn,_=ax1.hist(x[labels==l],bins=np.linspace(np.min(x),np.max(x),100),color=lighter(ccol,colidxs[i]),label=r'$x_%i$'%l)
# add legends and plot.
ax2.legend(loc='lower left',frameon=False,bbox_to_anchor=(-0.05,1.2),ncol=len(np.unique(classes)))
ax1.legend(loc='upper left',frameon=False,bbox_to_anchor=(-0.05,1.2),ncol=len(np.unique(labels)))
plt.subplots_adjust(top=0.7)
plt.tight_layout()
plt.savefig('img/%s_clusters.png'%mode)
plt.show()
def plot_feature_extraction():
""" Plot clustering step on EOD shape.
Plot is saved in img/shape_clusters.
"""
with np.load('data/feature_extraction.npz',allow_pickle=True) as pd:
# extract all variables from the dictionary:
raw_snippets = pd['raw_snippets']
normalized_snippets = pd['snippets']
features = pd['features']
labels = pd['clusters']
dt = 1/pd['samplerate']
ccol = cmap_pts[1]
# set up the figure layout
fig = plt.figure(figsize=(((2+0.2)*3),3))
outer = gridspec.GridSpec(1,2,wspace=0.2,hspace=0)
x = np.arange(-dt*1000*raw_snippets.shape[1]/2,dt*1000*raw_snippets.shape[1]/2,dt*1000)
snip_ax = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec = outer[0],hspace=0.35)
pc_ax = gridspec.GridSpecFromSubplotSpec(features.shape[1]-1,features.shape[1]-1,subplot_spec = outer[1],hspace=0,wspace=0)
# 3 plots: raw snippets, normalized, pcs.
ax_raw_snip = fig.add_subplot(snip_ax[0])
ax_normalized_snip = fig.add_subplot(snip_ax[1])
colidxs = -np.linspace(-1.25,-0.5,len(np.unique(labels[labels>=0])))
j=0
for c in np.unique(labels):
if c<0:
color='lightgrey'
else:
color = lighter(ccol,colidxs[j])
j=j+1
ax_raw_snip.plot(x,raw_snippets[labels==c].T,color=color,label='-1',rasterized=True,alpha=0.25)
ax_normalized_snip.plot(x,normalized_snippets[labels==c].T,color=color,alpha=0.25)
ax_raw_snip.spines['top'].set_visible(False)
ax_raw_snip.spines['right'].set_visible(False)
ax_raw_snip.get_xaxis().set_ticklabels([])
ax_raw_snip.set_title('Raw snippets')
ax_raw_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.spines['top'].set_visible(False)
ax_normalized_snip.spines['right'].set_visible(False)
ax_normalized_snip.set_title('Normalized snippets')
ax_normalized_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.set_xlabel('Time [ms]')
ax_raw_snip.axis('off')
ax_normalized_snip.axis('off')
ax_overlay = fig.add_subplot(pc_ax[:,:])
ax_overlay.set_title('Features')
ax_overlay.axis('off')
for n in range(features.shape[1]):
for m in range(n):
ax = fig.add_subplot(pc_ax[n-1,m])
ax.scatter(features[labels==c,m],features[labels==c,n],marker='.',color=color,alpha=0.25)
ax.set_xlim([np.min(features),np.max(features)])
ax.set_ylim([np.min(features),np.max(features)])
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if m==0:
ax.set_ylabel('PC %i'%(n+1))
if n==features.shape[1]-1:
ax.set_xlabel('PC %i'%(m+1))
ax = fig.add_subplot(pc_ax[0,features.shape[1]-2])
ax.set_xlim([np.min(features),np.max(features)])
ax.set_ylim([np.min(features),np.max(features)])
size = max(1,int(np.ceil(-np.log10(np.max(features)-np.min(features)))))
wbar = np.floor((np.max(features)-np.min(features))*10**size)/10**size
# should be smaller than the actual thing! so like x% of it?
xscalebar(ax,0,0,wbar,wformat='%%.%if'%size)
yscalebar(ax,0,0,wbar,hformat='%%.%if'%size)
ax.axis('off')
plt.savefig('img/shape_clusters.png')
plt.show()
def plot_moving_fish():
""" Plot moving fish detection step.
Plot is saved in img/moving_fish.png.
"""
fig = plt.figure(figsize=(7,3))
with np.load('data/moving_fish.npz',allow_pickle=True) as pd:
# extract all variables from the dictionary:
ws = pd['w']
dts = pd['dt']
clusterss = pd['clusters']
ts = pd['t']
fishcounts = pd['fishcount']
T = pd['T']
ignore_stepss = pd['ignore_steps']
# create gridspec
outer = gridspec.GridSpec(len(ws),1)
for i, (w, dt, clusters, t, fishcount, ignore_steps) in enumerate(zip(ws, dts, clusterss, ts, fishcounts, ignore_stepss)):
gs = gridspec.GridSpecFromSubplotSpec(2,1,subplot_spec = outer[i])
# axis for clusters
ax1 = fig.add_subplot(gs[0])
# axis for fishcount
ax2 = fig.add_subplot(gs[1])
# plot clusters as eventplot
for cnum,c in enumerate(np.unique(clusters[clusters>=0])):
ax1.eventplot(t[clusters==c],lineoffsets=cnum,linelengths=0.5,color=cmap(i))
cnum = cnum + 1
# Plot the sliding window
rect=Rectangle((0,-0.5),dt,cnum,linewidth=1,linestyle='--',edgecolor='k',facecolor='none',clip_on=False)
ax1.add_patch(rect)
ax1.arrow(dt+0.1,-0.5, 0.5,0,head_width=0.1, head_length=0.1,facecolor='k',edgecolor='k')
# plot parameters
ax1.set_title(r'$\tilde{w}_%i = %.3f ms$'%(i,1000*w))
ax1.set_ylabel('cluster #')
ax1.set_yticks(range(0,cnum))
ax1.set_xlabel('time')
ax1.set_xlim([0,T])
ax1.axes.xaxis.set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_visible(False)
# plot for fishcount
x = fishcount[0]
y = fishcount[1]
ax2 = fig.add_subplot(gs[1])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.axes.xaxis.set_visible(False)
yplot = np.copy(y)
ax2.plot(x+dt/2,yplot,linestyle='-',marker='.',c=cmap(i),alpha=0.25)
yplot[ignore_steps.astype(bool)] = np.NaN
ax2.plot(x+dt/2,yplot,linestyle='-',marker='.',c=cmap(i))
ax2.set_ylabel('Fish count')
ax2.set_yticks(range(int(np.min(y)),1+int(np.max(y))))
ax2.set_xlim([0,T])
if i < len(ws)-1:
ax2.axes.xaxis.set_visible(False)
else:
ax2.axes.xaxis.set_visible(False)
xscalebar(ax2,1,0,1,wunit='s',ha='right')
con = ConnectionPatch([0,-0.5], [dt/2,y[0]], "data", "data",
axesA=ax1, axesB=ax2,color='k')
ax2.add_artist(con)
con = ConnectionPatch([dt,-0.5], [dt/2,y[0]], "data", "data",
axesA=ax1, axesB=ax2,color='k')
ax2.add_artist(con)
plt.xlim([0,T])
plt.savefig('img/moving_fish.png')
plt.show()
def plot_eod_deletion():
""" Plot one pulse-type EOD, one artefact, one wave-type EOD, one sidepeak,
and the feature extraction steps for filtering out pulse-type EODs.
Plot is saved in img/eod_assessment.
"""
fig = plt.figure()
gs = gridspec.GridSpec(5,4,figure=fig,height_ratios=[0.25,1,1,1,1])
ax = fig.add_subplot(gs[0,0])
ax.text(0,0,r'$\overline{EOD}$',ha='center',va='center')
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.axis('off')
ax = fig.add_subplot(gs[0,1])
ax.text(0,0,r'$\mathcal{F}\{\overline{EOD}\}$',ha='center',va='center')
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.axis('off')
ax = fig.add_subplot(gs[0,2])
ax.text(0,0,r'$\dfrac{EOD_{width}}{ISI}$',ha='center',va='center')
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.axis('off')
ax = fig.add_subplot(gs[0,3])
ax.text(0,0,r'$\overline{EOD}$ & peaks',ha='center',va='center')
ax.set_xlim([-100,100])
ax.set_ylim([-100,100])
ax.axis('off')
fnames = ['good_eod_ad','artefact_ad','wave_eod_ad','sidepeak_ad']
for i, fname in enumerate(fnames):
with np.load('data/%s.npz'%fname,allow_pickle=True) as pd:
# extract all variables from the dictionary:
samplerates = pd['samplerates']
values = pd['values']
mean_eod = values[0]
fft = values[1]
isis = values[2]
ex_mean_eod = values[3][0]
pk = values[3][1][0]
tr = values[3][1][1]
hpk = values[3][2]
samplerate = samplerates[0]
samplerate_og = samplerates[1]
# plot original snippet
ax = fig.add_subplot(gs[i+1,0])
ax.plot(1000*np.arange(0,len(mean_eod)/samplerate,1/samplerate),mean_eod,c=cmap(7))
xscalebar(ax,1,0,max(1,int(250*len(mean_eod)/samplerate)),ha='right',wunit='ms')
ax.axis('off')
# plot fft
ax = fig.add_subplot(gs[i+1,1])
x = np.linspace(0,samplerate_og/2,len(fft))
ax.plot(x,np.abs(fft),c=cmap(0))
ax.fill_between(x,np.abs(fft),color=cmap(0),alpha=0.25)
ax.fill_between(x[:int(len(fft)/2)],np.abs(fft[:int(len(fft)/2)]),color=cmap(0),alpha=0.5)
ax.get_yaxis().set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xticks([0,samplerate_og/4,samplerate_og/2])
if i==3:
ax.set_xticklabels([0,r'$\dfrac{f_{Nyq}}{2}$',r'$f_{Nyq}$'])
else:
ax.set_xticklabels(["","",""])
# plot ISI distribution
ax = fig.add_subplot(gs[i+1,2])
vals,_,_=ax.hist(isis,bins=np.linspace(0,1,100),color=cmap(1))
ax.plot(np.median(isis),np.max(vals)*1.2,marker='v',color=cmap(1))
ax.set_xlim([-0.02,1.02])
ax.set_xticks([0,1])
if i<3:
ax.set_xticklabels(["",""])
ax.axes.get_yaxis().set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
# plot extended snippet
ax = fig.add_subplot(gs[i+1,3])
ax.plot(1000*np.arange(0,len(ex_mean_eod)/samplerate,1/samplerate),ex_mean_eod,c=cmap(7))
xscalebar(ax,1,0,max(1,int(250*len(ex_mean_eod)/samplerate)),ha='right',wunit='ms')
ax.axis('off')
# plot midline
ax.vlines(1000*len(ex_mean_eod)/samplerate/2,np.min(ex_mean_eod),np.max(ex_mean_eod),linestyle='--',color='k')
# plot detected peaks and troughs in snippet
ax.plot(1000*pk/samplerate, ex_mean_eod[pk],'o',c='k')
ax.plot(1000*tr/samplerate, ex_mean_eod[tr],'o',c='k')
# mark the bigges pk-trough combi.
ax.plot((1000*np.array([hpk])).T/samplerate,ex_mean_eod[hpk],linestyle='--',linewidth=2, marker='o',c=cmap(2),clip_on=False)
ax.axis('off')
plt.tight_layout()
plt.savefig('img/EOD_assessment.png')
plt.show()
plot_eod_properties()
plot_peak_detection()
plot_clustering()
plot_bgm('width',c_o)
plot_bgm('height',c_g)
plot_feature_extraction()
plot_moving_fish()
plot_eod_deletion()
|
gpl-3.0
|
rustychris/stompy
|
test/dev_crash.py
|
1
|
2913
|
import os
import time
import logging
import matplotlib.pyplot as plt
import numpy as np
import pdb
from scipy import optimize as opt
from stompy.spatial import field
from stompy import utils
from stompy.grid import (unstructured_grid, exact_delaunay, front)
import logging
logging.basicConfig(level=logging.INFO)
from stompy.spatial.linestring_utils import upsample_linearring,resample_linearring
from stompy.spatial import field,constrained_delaunay,wkb2shp
## Curve -
def hex_curve():
hexagon = np.array( [[0,11],
[10,0],
[30,0],
[40,9],
[30,20],
[10,20]] )
return front.Curve(hexagon)
def test_basic_setup():
boundary=hex_curve()
af=front.AdvancingTriangles()
scale=field.ConstantField(3)
af.add_curve(boundary)
af.set_edge_scale(scale)
# create boundary edges based on scale and curves:
af.initialize_boundaries()
return af
# when resample nodes on a sliding boundary, want to calculate the available
# span, and if it's small, start distributing the nodes evenly.
# where small is defined by local_scale * max_span_factor
def test_resample():
af=test_basic_setup()
a=0
b=af.grid.node_to_nodes(a)[0]
he=af.grid.nodes_to_halfedge(a,b)
anchor=he.node_rev()
n=he.node_fwd()
n2=he.rev().node_rev()
# Fails here, in grid.modify_node
af.resample(n=n,anchor=anchor,scale=25,direction=1)
af.resample(n=n2,anchor=anchor,scale=25,direction=-1)
test_resample()
# during modify_node(n=9)
# 9 comes in as node b in call to line_is_free
# vertex handle gives it as 22.5,0.0, which is the new location
# lw from line_walk is bad.
# after_add_node() just inserts the new point into the DT.
# - could be related to premature garbage collection of points?
# nope.
# - related to init_face? has to be there for proper functioning
# - or failure to remove the original vertex before creating the new one?
# no, that seems to be taken care of.
# does a line free call work before modifying the node?
# nope. So maybe something else in the early part of before_modify_node
# invalidates the state?
# it's the second time through the loop that fails?
# 10--9 crashes, even when it's the first in the loop
# even if we could drop init_face, it segfaults without it.
# segfaults when performing the line walk on a deep copy of DT.
# the test it is attempting is along an existing finite edge.
# happens whether the edge is constrained or not.
# Possible next steps:
# 1. could remove the node, insert in the new spot, maybe do a locate first?
# and for any nodes which are now DT neighbors clearly we can skip the
# line_is_free.
# 2. hand-write the line_is_free stuff, ala live_dt.
# 3. Abstract out the line_is_free stuff in live_dt, and both that and this
# can use it.
|
mit
|
AlfredNeverKog/BrainCarya
|
src/my/kadenze/lesson1/GaussianTF.py
|
1
|
1827
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
sess = tf.InteractiveSession() #open eval
sigma = 3.0
mean = 0
def gaus(x):
y = (1 / (sigma * tf.sqrt(2.0 * 3.14))) * tf.exp(tf.neg((tf.pow(x - mean, 2.0)) / (2 * tf.pow(sigma, 2.0))))
return y
def geus2d():
x = tf.linspace(-5.0,5.0,3)
y = gaus(x)
plt.plot(x.eval(), y.eval())
plt.show()
def gaus3d():
x = tf.linspace(-5.0, 5.0, 150)
y = gaus(x)
size = x.get_shape().as_list()[0]
gaus2d = tf.matmul(tf.reshape(y, [size, 1]), tf.reshape(y, [1, size]))
plt.imshow(gaus2d.eval())
plt.show()
def animation():
from matplotlib import animation
import random
fig = plt.figure()
ax = plt.axes()
line = ax.imshow([[]])
def animate(size):
global mean
print
size, mean
size = 300
mean += ((random.random() / 5) * (-1.0 if random.random() > .5 else 1.0))
x = tf.linspace(-5.0, 5.0, size + 1)
y = (1 / (sigma * tf.sqrt(2.0 * 3.14))) * tf.exp(tf.neg((tf.pow(x - mean, 2.0)) / (2 * tf.pow(sigma, 2.0))))
size = x.get_shape().as_list()[0]
gaus2d = tf.matmul(tf.reshape(y, [size, 1]), tf.reshape(y, [1, size]))
val = gaus2d.eval()
return ax.imshow(val),
"""
animate quality
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=60, interval=1, blit=True)
anim.save('gausian_quality.mp4', fps=3, extra_args=['-vcodec', 'libx264'])
plt.show()
sigma = 1.0
mean = 0.0
"""
"""
animate(5)
anim = animation.FuncAnimation(fig, animate,
frames=20, interval=1, blit=True)
anim.save('gausian_move.mp4', fps=5, extra_args=['-vcodec', 'libx264'])
plt.show()
"""
gaus3d()
|
mit
|
ManyBodyPhysics/LectureNotesPhysics
|
Programs/Chapter10-programs/python/imsrg_pairing/py27/plot_imsrg_flow.py
|
2
|
4799
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
# plot_imsrg_flow.py
#
# author: H. Hergert
# version: 1.0.1
# date: Jul 6, 2020
#
# tested with Python v2.7 and v3.7
#
#------------------------------------------------------------------------------
from sys import argv
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import SymLogNorm, Normalize
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
import numpy as np
from numpy import array, dot, diag, reshape
#------------------------------------------------------------------------------
# plot helpers
#------------------------------------------------------------------------------
# format tick labels using LaTeX-like math fonts
def myLabels(x, pos):
return '$%s$'%x
def myLogLabels(x, pos):
return '$10^{%d}$'%(np.log10(x))
# save these settings for use in both following plots
def myPlotSettings(ax):
ax.minorticks_on()
ax.tick_params(axis='both',which='major',width=1.5,length=8)
ax.tick_params(axis='both',which='minor',width=1.5,length=5)
ax.tick_params(axis='both',width=2,length=10,labelsize=20)
for s in ['left', 'right', 'top', 'bottom']:
ax.spines[s].set_linewidth(2)
return
#------------------------------------------------------------------------------
# plot flow
#------------------------------------------------------------------------------
def plot_energies(data, exact, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogx([1.0e-8,1.0e-4,1.0,100], [exact,exact,exact,exact], linewidth=2,
color='black', linestyle='dashed', dashes=(10,5))
plt.semilogx(data[:,0], data[:,1], color='blue', marker='o', markersize=9, label='$E$')
plt.semilogx(data[:,0], data[:,1]+data[:,2], color='red', marker='s', markersize=9, label='$+\Delta E^{(2)}$')
plt.semilogx(data[:,0], data[:,1]+data[:,2]+data[:,3], color='green', marker='D', markersize=9,label='$+\Delta E^{(3)}$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLabels))
ax.set_xlim([0.00006,13])
ymin,ymax=ax.get_ylim()
ax.set_ylim(ymin-0.005,ymax+0.005)
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$E\,\mathrm{[a.u.]}$', fontsize=20)
# plt.legend(bbox_to_anchor=(0.35, 0.05), loc=3, borderaxespad=0.5)
plt.legend(loc=1, borderaxespad=0.5)
plt.savefig("%s.pdf"%(filename), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_loglog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.loglog(data[:,0], data[:,6], basex=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.loglog(data[:,0], data[:,8], basex=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_semilog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogy(data[:,0], data[:,6], basey=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.semilogy(data[:,0], data[:,8], basey=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.semilog.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
def main():
filename = argv[1]
exact = argv[2]
# read data from file
data = np.loadtxt(filename, skiprows=2)
plot_energies(data, exact, filename)
plot_norms_loglog(data,filename)
plot_norms_semilog(data,filename)
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
cc0-1.0
|
datapythonista/pandas
|
pandas/tests/dtypes/test_generic.py
|
6
|
4327
|
from warnings import catch_warnings
import numpy as np
import pytest
from pandas.core.dtypes import generic as gt
import pandas as pd
import pandas._testing as tm
class TestABCClasses:
tuples = [[1, 2, 2], ["red", "blue", "red"]]
multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color"))
datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"])
timedelta_index = pd.to_timedelta(np.arange(5), unit="s")
period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M")
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)
sparse_array = pd.arrays.SparseArray(np.random.randn(10))
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
abc_pairs = [
("ABCInt64Index", pd.Int64Index([1, 2, 3])),
("ABCUInt64Index", pd.UInt64Index([1, 2, 3])),
("ABCFloat64Index", pd.Float64Index([1, 2, 3])),
("ABCMultiIndex", multi_index),
("ABCDatetimeIndex", datetime_index),
("ABCRangeIndex", pd.RangeIndex(3)),
("ABCTimedeltaIndex", timedelta_index),
("ABCIntervalIndex", pd.interval_range(start=0, end=3)),
("ABCPeriodArray", pd.arrays.PeriodArray([2000, 2001, 2002], freq="D")),
("ABCPandasArray", pd.arrays.PandasArray(np.array([0, 1, 2]))),
("ABCPeriodIndex", period_index),
("ABCCategoricalIndex", categorical_df.index),
("ABCSeries", pd.Series([1, 2, 3])),
("ABCDataFrame", df),
("ABCCategorical", categorical),
("ABCDatetimeArray", datetime_array),
("ABCTimedeltaArray", timedelta_array),
]
@pytest.mark.parametrize("abctype1, inst", abc_pairs)
@pytest.mark.parametrize("abctype2, _", abc_pairs)
def test_abc_pairs(self, abctype1, abctype2, inst, _):
# GH 38588
if abctype1 == abctype2:
assert isinstance(inst, getattr(gt, abctype2))
else:
assert not isinstance(inst, getattr(gt, abctype2))
abc_subclasses = {
"ABCIndex": [
abctype
for abctype, _ in abc_pairs
if "Index" in abctype and abctype != "ABCIndex"
],
"ABCNDFrame": ["ABCSeries", "ABCDataFrame"],
"ABCExtensionArray": [
"ABCCategorical",
"ABCDatetimeArray",
"ABCPeriodArray",
"ABCTimedeltaArray",
],
}
@pytest.mark.parametrize("parent, subs", abc_subclasses.items())
@pytest.mark.parametrize("abctype, inst", abc_pairs)
def test_abc_hierarchy(self, parent, subs, abctype, inst):
# GH 38588
if abctype in subs:
assert isinstance(inst, getattr(gt, parent))
else:
assert not isinstance(inst, getattr(gt, parent))
@pytest.mark.parametrize("abctype", [e for e in gt.__dict__ if e.startswith("ABC")])
def test_abc_coverage(self, abctype):
# GH 38588
assert (
abctype in (e for e, _ in self.abc_pairs) or abctype in self.abc_subclasses
)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df["three"] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
|
bsd-3-clause
|
jpn--/larch
|
larch/data_services/h5/h5pod/generic.py
|
1
|
31510
|
import os
from pathlib import Path
import tables as tb
import numpy
import pandas
import logging
from ....util import Dict
from ....util.aster import asterize
from ....util.text_manip import truncate_path_for_display
from ... import _reserved_names_
from ...pod import Pod
from ...general import _sqz_same, selector_len_for
from .... import warning
class IncompatibleShape(ValueError):
pass
class NoKnownShape(ValueError):
pass
class CArray(tb.CArray):
@property
def DICTIONARY(self):
return Dict( self._v_attrs.DICTIONARY )
@DICTIONARY.setter
def DICTIONARY(self, x):
self._v_attrs.DICTIONARY = dict(x)
@property
def DESCRIPTION(self):
return self._v_attrs.TITLE
@DESCRIPTION.setter
def DESCRIPTION(self, x):
self._v_attrs.TITLE = str(x)
@property
def TITLE(self):
return self._v_attrs.TITLE
@TITLE.setter
def TITLE(self, x):
self._v_attrs.TITLE = str(x)
def __repr__(self):
r = super().__repr__()
try:
d = self.DICTIONARY
except:
return r
r += "\n dictionary := {\n "
r += repr(d).replace("\n","\n ")
r += "\n }"
return r
def uniques(self, slicer=None, counts=False):
if isinstance(slicer, (bool, int)) and counts is False:
counts = bool(slicer)
slicer = None
if slicer is None:
slicer = slice(None)
action = self[slicer]
len_action = len(action)
try:
action = action[~numpy.isnan(action)]
except TypeError:
num_nan = 0
else:
num_nan = len_action - len(action)
if counts:
x = numpy.unique(action, return_counts=counts)
try:
d = self.DICTIONARY
except AttributeError:
y = pandas.Series(x[1], x[0])
else:
y = pandas.Series(x[1], [(d[j] if j in d else j) for j in x[0]])
if num_nan:
y[numpy.nan] = num_nan
return y
if num_nan:
numpy.append(action, numpy.nan)
return numpy.unique(action)
class H5Pod(Pod):
def __init__(self, filename=None, mode='a', groupnode=None, *,
h5f=None, inmemory=False, temp=False,
complevel=1, complib='zlib',
do_nothing=False,
ident=None,
shape=None,
):
super().__init__(ident=ident)
if do_nothing:
return
if isinstance(filename, H5Pod):
# Copy / Re-Class contructor
x = filename
self._groupnode = x._groupnode
self._h5f_own = False
return
if isinstance(filename, tb.group.Group) and groupnode is None:
# Called with just a group node, use it
groupnode = filename
filename = None
if isinstance(filename, (str,Path)):
filename = os.fspath(filename)
if groupnode is None:
groupnode = "/"
if filename is None and mode=='a' and groupnode is None:
groupnode = '/' # default constructor for temp obj
if isinstance(groupnode, tb.group.Group):
# Use the existing info from this group node, ignore all other inputs
self._groupnode = groupnode
filename = groupnode._v_file.filename
mode = groupnode._v_file.mode
self._h5f_own = False
elif isinstance(groupnode, str):
# apply expanduser to filename to allow for home-folder based filenames
if isinstance(filename,str):
filename = os.path.expanduser(filename)
if filename is None:
temp = True
from ....util.temporaryfile import TemporaryFile
self._TemporaryFile = TemporaryFile(suffix='.h5d')
filename = self._TemporaryFile.name
if h5f is not None:
self._h5f_own = False
self._groupnode = self._h5f.get_node(groupnode)
else:
kwd = {}
if inmemory or temp:
kwd['driver']="H5FD_CORE"
if temp:
kwd['driver_core_backing_store']=0
if complevel is not None:
kwd['filters']=tb.Filters(complib=complib, complevel=complevel)
self._h5f_obj = tb.open_file(filename, mode, **kwd)
self._h5f_own = True
try:
self._groupnode = self._h5f_obj.get_node(groupnode)
except tb.NoSuchNodeError:
if isinstance(groupnode, str):
self._groupnode = self._h5f_obj._get_or_create_path(groupnode, True)
else:
raise ValueError('must give groupnode as `str` or `tables.group.Group`')
self._recoverable = (filename, self._groupnode._v_pathname)
if shape is not None:
self.shape = shape
@property
def _h5f(self):
return self._groupnode._v_file
def __repr__(self):
from ....util.text_manip import max_len
s = f"<larch.{self.__class__.__name__}>"
try:
s += f"\n | file: {truncate_path_for_display(self.filename)}"
if self._groupnode._v_pathname != "/":
s += f"\n | node: {self._groupnode._v_pathname}"
try:
shape = self.shape
except NoKnownShape:
shape = None
else:
s += f"\n | shape: {shape}"
try:
metashape = self.metashape
except (NoKnownShape, AttributeError):
pass
else:
if metashape != shape:
s += f"\n | metashape: {metashape}"
if len(self._groupnode._v_children):
s += "\n | data:"
just = max_len(self._groupnode._v_children.keys())
for i in sorted(self._groupnode._v_children.keys()):
try:
node_dtype = self._groupnode._v_children[i].dtype
except tb.NoSuchNodeError:
node_dtype = "<no dtype>"
s += "\n | {0:{2}s} ({1})".format(i, node_dtype, just)
else:
s += "\n | data: <empty>"
except (tb.ClosedNodeError, tb.ClosedFileError):
s += f"\n | <file is closed>"
s += f"\n | file: {truncate_path_for_display(self._recoverable[0])}"
s += f"\n | node: {self._recoverable[1]}"
return s
def __xml__(self, no_data=False, descriptions=True, dictionaries=False):
from xmle import Elem
x = Elem('div')
t = x.elem('table', style="margin-top:1px;")
t.elem('caption', text=f"<larch.{self.__class__.__name__}>", style="caption-side:top;text-align:left;font-family:Roboto;font-weight:700;font-style:normal;font-size:100%;padding:0px;")
#
try:
ident = self.ident
except AttributeError:
pass
else:
tr = t.elem('tr')
tr.elem('th', text='ident')
tr.elem('td', text=ident)
#
try:
filename = self.filename
except AttributeError:
pass
else:
tr = t.elem('tr')
tr.elem('th', text='file')
tr.elem('td', text=truncate_path_for_display(filename))
#
try:
filemode = self.filemode
except AttributeError:
pass
else:
tr = t.elem('tr')
tr.elem('th', text='mode')
tr.elem('td', text=truncate_path_for_display(self.filemode))
#
try:
v_pathname = self._groupnode._v_pathname
except AttributeError:
pass
else:
if self._groupnode._v_pathname != "/":
tr = t.elem('tr')
tr.elem('th', text='node')
tr.elem('td', text=self._groupnode._v_pathname)
#
try:
str_shape = str(self.shape)
except NoKnownShape:
pass
else:
tr = t.elem('tr')
tr.elem('th', text='shape')
tr.elem('td', text=str_shape)
#
try:
str_shape = str(self.metashape)
except (NoKnownShape, AttributeError):
pass
else:
tr = t.elem('tr')
tr.elem('th', text='metashape')
tr.elem('td', text=str_shape)
#
try:
str_durable_mask = f"0x{self.durable_mask:X}"
except (AttributeError):
pass
else:
if str_durable_mask!='0x0':
tr = t.elem('tr')
tr.elem('th', text='durable_mask')
tr.elem('td', text=str_durable_mask)
#
if not no_data:
if len(self._groupnode._v_children):
tr = t.elem('tr')
tr.elem('th', text='data', style='vertical-align:top;')
td = tr.elem('td')
t1 = td.elem('table', cls='dictionary')
t1head = t1.elem('thead')
t1headr = t1head.elem('tr')
t1headr.elem('th', text='name')
t1headr.elem('th', text='dtype')
if descriptions:
t1headr.elem('th', text='description')
any_sources = 0
for i in sorted(self._groupnode._v_children.keys()):
try:
node_dtype = self._groupnode._v_children[i].dtype
except (tb.NoSuchNodeError, AttributeError):
node_dtype = "<no dtype>"
if i not in _reserved_names_:
tr1 = t1.elem('tr')
tr1.elem('td', text=i)
tr1.elem('td', text=node_dtype)
if descriptions:
try:
title = self._groupnode._v_children[i]._v_attrs['TITLE']
except:
title = ""
else:
tr1.elem('td', text=title)
try:
orig_source = self._groupnode._v_children[i]._v_attrs['ORIGINAL_SOURCE']
except:
pass
else:
tr1.elem('td', text=orig_source)
any_sources += 1
if any_sources:
t1headr.elem('th', text='source')
else:
tr = t.elem('tr')
tr.elem('th', text='data', style='vertical-align:top;')
tr.elem('td', text='<empty>')
return x
def _repr_html_(self):
return self.__xml__().tostring()
def change_mode(self, mode, **kwarg):
"""Change the file mode of the underlying HDF5 file.
Can be used to change from read-only to read-write.
"""
if mode == self.filemode:
return
if mode == 'w':
raise TypeError("cannot change_mode to w, close the file and delete it")
filename = self.filename
groupnode_path = self._groupnode._v_pathname
self.close()
self.__init__(filename, mode, groupnode=groupnode_path, **kwarg)
return self
def reopen(self, mode='r', **kwarg):
"""Reopen the underlying HDF5 file.
Can be used to change from read-only to read-write or to reopen a file that was closed.
"""
if mode == self.filemode:
return
if mode == 'w':
raise TypeError("cannot change_mode to w, close the file and delete it")
filename = self.filename
groupnode_path = self.groupnode_path
try:
self.close()
except tb.ClosedNodeError:
pass
self.__init__(filename, mode, groupnode=groupnode_path, **kwarg)
return self
def names(self):
return [i for i in self._groupnode._v_children.keys() if i not in _reserved_names_]
def rename_vars(self, *rename_vars):
"""
Rename variables according to patterns.
Parameters
----------
rename_vars : 2-tuples
A sequence of 2-tuples, giving (pattern, replacement) that will be fed to re.sub.
For example, give ('^','prefix_') to add prefix to all variable names, or
('^from_this_name$','to_this_name') to change an exact name from one thing to another.
"""
import re
for pattern, replacement in rename_vars:
q = [(re.sub(pattern, replacement, k),k) for k in self.names()]
for _to,_from in q:
if _to != _from:
self._groupnode._v_children[_from]._f_rename(_to)
def reshape(self, *shape):
if len(shape)==0:
raise ValueError('no shape given')
if len(shape)==1 and isinstance(shape[0], tuple):
shape = shape[0]
if isinstance(shape, int):
shape = (shape,)
if not isinstance(shape, tuple):
raise TypeError('reshape must be int or tuple')
if len(shape)==1 and shape[0] == -1:
shape = (int(numpy.product(self.shape)), )
elif len(shape)==2:
if shape[0] == -1 and shape[1] > 0:
shape = (int(numpy.product(self.shape) / shape[1]), shape[1])
if shape[1] == -1 and shape[0] > 0:
shape = (shape[0], int(numpy.product(self.shape) / shape[0]))
if numpy.product(shape) != numpy.product(self.shape):
raise ValueError(f'incompatible reshape {shape} for current shape {self.shape}')
#print("reshape to", shape)
for k in self._groupnode._v_children.keys():
#print("reshape",k,shape)
self._groupnode._v_children[k].shape = shape
self.shape = shape
def __dir__(self):
x = super().__dir__()
x.extend(self.names())
return x
@property
def filename(self):
try:
return self._groupnode._v_file.filename
except (tb.ClosedNodeError, tb.ClosedFileError) as err:
try:
return self._last_closed_filename
except AttributeError:
raise err
@property
def filemode(self):
try:
return self._groupnode._v_file.mode
except (tb.ClosedNodeError, tb.ClosedFileError) as err:
return None
@property
def groupnode_path(self):
try:
return self._groupnode._v_pathname
except (tb.ClosedNodeError, tb.ClosedFileError) as err:
try:
return self._last_closed_groupnode_path
except AttributeError:
raise err
@property
def n_cases(self):
return self.shape[0]
@property
def shape(self):
"""The shape of the pod.
"""
if 'SHAPE' in self._groupnode._v_attrs:
return tuple(self._groupnode._v_attrs['SHAPE'][:])
if len(self.names()):
for v in self._groupnode._v_children.values():
try:
found_shape = v.shape
except:
pass
else:
try:
self.shape = found_shape
except:
pass
return tuple(found_shape)
raise NoKnownShape()
@shape.setter
def shape(self, x):
# if self._groupnode._v_nchildren > 0:
# raise ValueError('this pod has shape {!s} but you want to set {!s}'.format(self.shape, x))
# if self._groupnode._v_nchildren == 0:
self._groupnode._v_attrs.SHAPE = numpy.asarray(x, dtype=int)
@property
def metashape(self):
"""The actual shape of the data underlying the pod, often same as shape."""
return self.shape
def add_expression(self, name, expression, *, overwrite=False, title=None, dictionary=None, dtype=None):
arr = self[expression]
if dtype is not None:
arr = arr.astype(dtype)
try:
dtype_str = "("+dtype.__name__+")"
except:
dtype_str = ""
original_source = f'={dtype_str} {expression}'
else:
original_source = f'= {expression}'
if overwrite=='ignore':
overwrite = False
if_exists = 'ignore'
else:
if_exists = 'raise'
try:
self.add_array(name, arr, overwrite=overwrite, title=title, dictionary=dictionary,
original_source=original_source, rel_original_source=False)
except tb.exceptions.NodeError:
if if_exists=='ignore':
pass
else:
raise
def add_array(self, name, arr, *, overwrite=False, original_source=None, rel_original_source=True,
title=None, dictionary=None, fix_name_problems=True):
"""Create a new variable in the H5Pod.
Creating a new variable in the data might be convenient in some instances.
If you create an array externally, you can add it to the file easily with
this command.
Parameters
----------
name : str
The name of the new variable.
arr : ndarray
An array to add as the new variable. Must have the correct shape.
overwrite : bool
Should the variable be overwritten if it already exists, default to False.
original_source : str
Optionally, give the file name or other description of the source of the data in this array.
rel_original_source : bool
If true, change the absolute path of the original_source to a relative path viz this file.
title : str, optional
A descriptive title for the variable, typically a short phrase but an
arbitrary length description is allowed.
dictionary : dict, optional
A data dictionary explaining some or all of the values in this field.
Even for otherwise self-explanatory numerical values, the dictionary
may give useful information about particular out of range values.
Raises
------
tables.exceptions.NodeError
If a variable of the same name already exists.
"""
if name in _reserved_names_:
raise ValueError(f'{name} is a reserved name')
if '/' in name and fix_name_problems:
import warnings
warnings.warn(f'the ``/`` character is not allowed in variable names ({name})\n'
f'changing it to ``|``')
name = name.replace('/','|')
try:
existing_shape = tuple(self.metashape)
except NoKnownShape:
pass
else:
if existing_shape != arr.shape:
# maybe just has extra size-1 dims, check for that...
arr = arr.squeeze()
if self.podtype == 'idcs':
if existing_shape[:-1] != arr.shape:
raise IncompatibleShape(
"new array must have shape {!s} but the array given has shape {!s}".format(self.shape, arr.shape))
else:
if existing_shape != arr.shape:
raise IncompatibleShape(
"new array must have shape {!s} but the array given has shape {!s}".format(self.shape, arr.shape))
if overwrite:
self.delete_array(name)
try:
h5var = self._h5f.create_carray(self._groupnode, name, obj=arr)
except ValueError as valerr:
if "unknown type" in str(valerr) or "unknown kind" in str(valerr): # changed for pytables 3.3
try:
tb_atom = tb.Atom.from_dtype(arr.dtype)
except ValueError:
log = logging.getLogger('H5')
try:
maxlen = int(len(max(arr.astype(str), key=len)))
except ValueError:
import datetime
if 0: # isinstance(arr[0], datetime.time):
log.warning(f" column {name} is datetime.time, converting to Time32")
tb_atom = tb.atom.Time32Atom()
# convert_datetime_time_to_epoch_seconds = lambda tm: tm.hour*3600+ tm.minute*60 + tm.second
def convert_datetime_time_to_epoch_seconds(tm):
try:
return tm.hour * 3600 + tm.minute * 60 + tm.second
except:
if numpy.isnan(tm):
return 0
else:
raise
arr = arr.apply(convert_datetime_time_to_epoch_seconds).astype(numpy.int32).values
else:
# import __main__
# __main__.err_df = df
raise
else:
maxlen = max(maxlen, 8)
if arr.dtype != object:
log.warning(f"cannot create column {name} as dtype {arr.dtype}, converting to S{maxlen:d}")
arr = arr.astype('S{}'.format(maxlen))
tb_atom = tb.Atom.from_dtype(arr.dtype)
h5var = self._h5f.create_carray(self._groupnode, name, tb_atom, shape=arr.shape)
h5var[:] = arr
else:
raise
if rel_original_source and original_source and original_source[0] != '=':
basedir = os.path.dirname(self.filename)
original_source = os.path.relpath(original_source, start=basedir)
if original_source is not None:
h5var._v_attrs.ORIGINAL_SOURCE = original_source
if title is not None:
h5var._v_attrs.TITLE = title
if dictionary is not None:
h5var._v_attrs.DICTIONARY = dictionary
def add_blank(self, name, shape=None, dtype=numpy.float64, **kwargs):
"""Create a new variable in the H5Pod.
Creating a new variable in the data might be convenient in some instances.
If you create an array externally, you can add it to the file easily with
this command.
Parameters
----------
name : str
The name of the new variable.
dtype : dtype
The dtype of the empty array to add as the new variable.
shape : tuple
The shape of the empty array to add. Must be compatible with existing
shape, if any.
Other keyword parameters are passed through to `add_array`.
Raises
------
tables.exceptions.NodeError
If a variable of the same name already exists.
NoKnownShape
If shape is not given and not already known from the file.
"""
if name in _reserved_names_:
raise ValueError(f'{name} is a reserved name')
try:
existing_shape = tuple(self.metashape)
except NoKnownShape:
if shape is None:
raise
else:
if shape is None:
shape = existing_shape
if existing_shape != tuple(shape):
raise IncompatibleShape(
"new array must have shape {!s} but the array given has shape {!s}".format(self.shape, shape))
return self.add_array(name, numpy.zeros(shape, dtype=dtype), **kwargs)
def delete_array(self, name, recursive=True):
"""Delete an existing variable.
Parameters
----------
name : str
The name of the data node to remove.
recursive : bool
If the data node is a group, recursively remove all sub-nodes.
"""
if name in _reserved_names_:
raise ValueError(f'{name} is a reserved name')
try:
self._h5f.remove_node(self._groupnode, name, recursive)
except tb.exceptions.NoSuchNodeError:
pass
def flush(self, *arg, **kwargs):
return self._h5f.flush(*arg, **kwargs)
def close(self, *arg, **kwargs):
try:
self._last_closed_filename = self.filename
self._last_closed_groupnode_path = self.groupnode_path
except:
pass
return self._h5f.close(*arg, **kwargs)
@property
def podtype(self):
return ''
def uri(self, mode=None):
from urllib.parse import urlunparse
q_dict = {}
if self.podtype:
q_dict['type'] = self.podtype
if mode:
q_dict['mode'] = mode
q = "&".join(f'{k}={v}' for k,v in q_dict.items())
return urlunparse(['file', '', self.filename, '', q, self._groupnode._v_pathname])
def _remake_command(self, cmd, selector=None, receiver=None):
from tokenize import tokenize, untokenize, NAME, OP, STRING
DOT = (OP, '.')
COLON = (OP, ':')
COMMA = (OP, ',')
OBRAC = (OP, '[')
CBRAC = (OP, ']')
OPAR = (OP, '(')
CPAR = (OP, ')')
from io import BytesIO
recommand = []
if receiver:
recommand += [(NAME, receiver), OBRAC, COLON, CBRAC, (OP, '='), ]
try:
cmd_encode = cmd.encode('utf-8')
except AttributeError:
cmd_encode = str(cmd).encode('utf-8')
dims = len(self.shape)
g = tokenize(BytesIO(cmd_encode).readline)
if selector is None:
screen_tokens = [COLON,]
else:
# try:
# slicer_encode = selector.encode('utf-8')
# except AttributeError:
# slicer_encode = str(selector).encode('utf-8')
# screen_tokens = [(toknum, tokval) for toknum, tokval, _, _, _ in tokenize(BytesIO(slicer_encode).readline)]
screen_tokens = [(NAME, 'selector'), ]
for toknum, tokval, _, _, _ in g:
if toknum == NAME and tokval in self._groupnode:
# replace NAME tokens
partial = [(NAME, 'self'), DOT, (NAME, '_groupnode'), DOT, (NAME, tokval), OBRAC, ]
partial += screen_tokens
if len(self._groupnode._v_children[tokval].shape)>1:
partial += [COMMA, COLON, ]
if len(self._groupnode._v_children[tokval].shape)>2:
partial += [COMMA, COLON, ]
if len(self._groupnode._v_children[tokval].shape)>3:
partial += [COMMA, COLON, ]
partial += [CBRAC,]
recommand.extend(partial)
else:
recommand.append((toknum, tokval))
# print("<recommand>")
# print(recommand)
# print("</recommand>")
ret = untokenize(recommand).decode('utf-8')
return asterize(ret, mode="exec" if receiver is not None else "eval"), ret
def _evaluate_single_item(self, cmd, selector=None, receiver=None):
j, j_plain = self._remake_command(cmd, selector=selector, receiver='receiver' if receiver is not None else None)
# important globals
from ....util.aster import inXd
from numpy import log, exp, log1p, absolute, fabs, sqrt, isnan, isfinite, logaddexp, fmin, fmax, nan_to_num, sin, cos, pi
from ....util.common_functions import piece, normalize, boolean
try:
if receiver is not None:
exec(j)
else:
return eval(j)
except Exception as exc:
args = exc.args
if not args:
arg0 = ''
else:
arg0 = args[0]
arg0 = arg0 + '\nwithin parsed command: "{!s}"'.format(cmd)
arg0 = arg0 + '\nwithin re-parsed command: "{!s}"'.format(j_plain)
if selector is not None:
arg0 = arg0 + '\nwith selector: "{!s}"'.format(selector)
if "max" in cmd:
arg0 = arg0 + '\n(note to get the maximum of arrays use "fmax" not "max")'.format(cmd)
if "min" in cmd:
arg0 = arg0 + '\n(note to get the minimum of arrays use "fmin" not "min")'.format(cmd)
if isinstance(exc, NameError):
badname = str(exc).split("'")[1]
goodnames = dir()
from ....util.text_manip import case_insensitive_close_matches
did_you_mean_list = case_insensitive_close_matches(badname, goodnames, n=3, cutoff=0.1, excpt=None)
if len(did_you_mean_list) > 0:
arg0 = arg0 + '\n' + "did you mean {}?".format(
" or ".join("'{}'".format(s) for s in did_you_mean_list))
exc.args = (arg0,) + args[1:]
raise
def __contains__(self, item):
if item in self._groupnode:
return True
return False
def dtype_of(self, name):
"""dtype of raw data for a particular named data item."""
if name in self._groupnode._v_children:
return self._groupnode._v_children[name].dtype
raise KeyError(f"{name} not found")
def load_data_item(self, name, result, selector=None):
"""Load a slice of the pod arrays into an array in memory"""
# convert a single name string to a one item list
_sqz_same(result.shape, [selector_len_for(selector, self.shape[0]), *self.shape[1:]])
try:
result[:] = self._evaluate_single_item(name, selector)
except IndexError:
# https://github.com/PyTables/PyTables/issues/310
_temp = self._evaluate_single_item(name, None)
try:
result[:] = _temp[selector]
except Exception as err:
raise ValueError(f'_temp.shape={_temp.shape} selector.shape={selector.shape}') from err
return result
def load_meta_data_item(self, name, result, selector=None):
"""Load a slice of the pod arrays into an array in memory"""
if selector is not None:
import warnings
warnings.warn('selector not compatible for load_meta_data_item')
# convert a single name string to a one item list
_sqz_same(result.shape, self.metashape)
try:
result[:] = self._evaluate_single_item(name, selector)
except IndexError:
# https://github.com/PyTables/PyTables/issues/310
result[:] = self._evaluate_single_item(name, None)[selector]
return result
def get_data_dictionary(self, name):
"""dictionary of raw data for a particular named data item."""
if name in self._groupnode._v_children:
return self._groupnode._v_children[name].DICTIONARY
raise KeyError(f"{name} not found")
def __getitem__(self, item):
if isinstance(item, tuple) and len(item)>=2 and isinstance(item[-1], slice):
names, slice_ = item[:-1], item[-1]
else:
names = item
slice_ = None
# convert a single name string to a one item list
if isinstance(names, str):
names = [names,]
dtype = numpy.float64
result = numpy.zeros( [selector_len_for(slice_, self.shape[0]), *self.shape[1:], len(names)], dtype=dtype)
for i, cmd in enumerate(names):
result[...,i] = self._evaluate_single_item(cmd, slice_)
return result
def _load_into(self, names, slc, result):
"""Load a slice of the pod arrays into an array in memory"""
# convert a single name string to a one item list
if isinstance(names, str):
names = [names, ]
_sqz_same(result.shape,[selector_len_for(slc, self.shape[0]), *self.shape[1:], len(names)])
for i, cmd in enumerate(names):
result[..., i] = self._evaluate_single_item(cmd, slc)
return result
def load_into(self, names, selector, result):
"""Load a slice of the pod arrays into an array in memory"""
# convert a single name string to a one item list
if isinstance(names, str):
names = [names, ]
_sqz_same(result.shape, [selector_len_for(selector, self.shape[0]), *self.shape[1:], len(names)])
for i, cmd in enumerate(names):
result[..., i] = self._evaluate_single_item(cmd, selector)
return result
def __getattr__(self, item):
if item in self._groupnode._v_children:
ret = self._groupnode._v_children[item]
if isinstance(ret, tb.CArray):
ret.__class__ = CArray
return ret
raise AttributeError(item)
class _dataframe_factory():
def __init__(self, obj):
self.obj = obj
def __getattr__(self, item):
return getattr(self.obj,item)
def __getitem__(self, item):
if len(self.obj.shape) > 1:
try:
metashape = self.obj.metashape
except AttributeError:
raise TypeError('dataframe access currently only compatible with 1d, use regular arrays for higher dimensions')
else:
if len(metashape) > 1:
raise TypeError('dataframe access currently only compatible with 1d, use regular arrays for higher dimensions')
if isinstance(item, tuple) and len(item)>=2 and isinstance(item[-1], slice):
names, slice_ = item[:-1], item[-1]
else:
names = item
slice_ = None
# convert a single name string to a one item list
if isinstance(names, str):
names = [names,]
result = pandas.DataFrame()
for i, cmd in enumerate(names):
j = self.obj._evaluate_single_item(cmd, selector=slice_)
try:
#result.loc[:,cmd] = j
result = result.assign(**{cmd:j})
except:
print()
print(f"An error in tacking {cmd} to result")
print(f"j.dtype is {j.dtype}")
print(f"j.shape is {j.shape}")
print(f"result.shape is {result.shape}")
print()
raise
return result
@property
def dataframe(self):
return self._dataframe_factory(self)
def astype(self, t:str):
from . import _pod_types
cls = _pod_types[t.lower()]
return cls(self)
def statistics_for(self, var, histogram=True, selector=None, **kwargs):
a = self.get_data_item(var)
if isinstance(selector, str):
selector = self.get_data_item(selector, None, dtype=bool)
if selector is not None:
a = a[selector]
from ....util.statistics import statistics_for_array
try:
dictionary = self._groupnode._v_children[var]._v_attrs.DICTIONARY
except:
dictionary = None
try:
descrip = self._groupnode._v_children[var]._v_attrs.TITLE
except:
descrip = None
result = statistics_for_array(a, histogram=histogram, varname=var, dictionary=dictionary, **kwargs)
if descrip is not None and descrip!="":
result.description = descrip
if dictionary is not None:
result.dictionary = Dict(dictionary)
return result
def statistics(self, vars=None, histogram=False, selector=None):
if vars is None:
vars = self.names()
from ....util import Dict
from ....util.arraytools import scalarize
import numpy.ma as ma
stats = pandas.DataFrame(
columns=[
'n',
'minimum',
'maximum',
'median',
'mean',
'stdev',
'nonzero_minimum',
'nonzero_maximum',
'nonzero_mean',
'nonzero_stdev',
'zeros',
'positives',
'negatives',
] + (['histogram'] if histogram else []),
index = vars,
)
for var in vars:
if selector is not None:
if isinstance(selector, slice):
a = self[var, selector]
else:
a = self[var][selector]
else:
a = self[var]
stats.loc[var,'n'] = scalarize(a.shape[0])
stats.loc[var,'minimum'] = scalarize(numpy.nanmin(a, axis=0))
stats.loc[var,'maximum'] = scalarize(numpy.nanmax(a, axis=0))
stats.loc[var,'median'] = scalarize(numpy.nanmedian(a, axis=0))
if histogram:
from ....util.histograms import sizable_histogram_figure, seems_like_discrete_data
try:
dictionary = self.get_data_dictionary(var)
except:
dictionary = None
stats.loc[var,'histogram'] = sizable_histogram_figure(
a,
title=None, xlabel=var, ylabel='Frequency',
discrete=seems_like_discrete_data(a, dictionary)
)
ax = ma.masked_array(a, mask=~numpy.isfinite(a))
stats.loc[var,'mean'] = scalarize(numpy.mean(ax, axis=0))
stats.loc[var,'stdev'] = scalarize(numpy.std(ax, axis=0))
stats.loc[var, 'zeros'] = scalarize(numpy.sum(numpy.logical_not(ax), axis=0))
stats.loc[var, 'positives'] = scalarize(numpy.sum(ax>0, axis=0))
stats.loc[var, 'negatives'] = scalarize(numpy.sum(ax<0, axis=0))
ax.mask |= (ax==0)
stats.loc[var,'nonzero_minimum'] = scalarize(numpy.min(ax, axis=0))
stats.loc[var,'nonzero_maximum'] = scalarize(numpy.max(ax, axis=0))
stats.loc[var,'nonzero_mean'] = scalarize(numpy.mean(ax, axis=0))
stats.loc[var,'nonzero_stdev'] = scalarize(numpy.std(ax, axis=0))
if histogram:
from ....util.dataframe import DataFrameViewer
return DataFrameViewer(stats)
return stats
def get_row(self, rownum, lookup=True):
result = Dict()
for i in self.names():
result[i] = self._groupnode._v_children[i][rownum]
if lookup:
try:
d = self._groupnode._v_children[i]._v_attrs.DICTIONARY
except (KeyError, AttributeError):
pass
else:
if result[i] in d:
result[i] = f"{result[i]} ({d[result[i]]})"
return result
@property
def vault(self):
try:
return self.__vault
except:
from ..h5util import get_or_create_group
from ..h5vault import H5Vault
v = get_or_create_group( self._h5f, self._groupnode, name='_VAULT_', title='', filters=None, createparents=False, skip_on_readonly=False )
self.__vault = H5Vault(v)
return self.__vault
|
gpl-3.0
|
setten/pymatgen
|
pymatgen/analysis/surface_analysis.py
|
3
|
21488
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import copy
import numpy as np
from scipy.stats import linregress
from matplotlib import cm
import itertools
import warnings
from pymatgen.core.structure import Structure, Composition
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.surface import Slab
from pymatgen.analysis.wulff import WulffShape
from pymatgen import MPRester
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen import Element
from pymatgen.util.plotting import pretty_plot
__author__ = "Richard Tran"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Richard Tran"
__email__ = "[email protected]"
__date__ = "8/24/17"
class SurfaceEnergyAnalyzer(object):
"""
A class used for analyzing the surface energies of a material of a given
material_id. By default, this will use entries calculated from the
Materials Project to obtain chemical potential and bulk energy. As a
result, the difference in VASP parameters between the user's entry
(vasprun_dict) and the parameters used by Materials Project, may lead
to a rough estimate of the surface energy. For best results, it is
recommend that the user calculates all decomposition components first,
and insert the results into their own database as a pymatgen-db entry
and use those entries instead (custom_entries). In addition, this code
will only use one bulk entry to calculate surface energy. Ideally, to
get the most accurate surface energy, the user should compare their
slab energy to the energy of the oriented unit cell with both calculations
containing consistent k-points to avoid converegence problems as the
slab size is varied. See:
Sun, W.; Ceder, G. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
and
Rogal, J., & Reuter, K. (2007). Ab Initio Atomistic Thermodynamics for
Surfaces : A Primer. Experiment, Modeling and Simulation of Gas-Surface
Interactions for Reactive Flows in Hypersonic Flights, 2–1 – 2–18.
.. attribute:: ref_element
All chemical potentials cna be written in terms of the range of chemical
potential of this element which will be used to calculate surface energy.
.. attribute:: mprester
Materials project rester for querying entries from the materials project.
Requires user MAPIKEY.
.. attribute:: ucell_entry
Materials Project entry of the material of the slab.
.. attribute:: x
Reduced amount composition of decomposed compound A in the bulk.
.. attribute:: y
Reduced amount composition of ref_element in the bulk.
.. attribute:: gbulk
Gibbs free energy of the bulk per formula unit
.. attribute:: chempot_range
List of the min and max chemical potential of ref_element.
.. attribute:: e_of_element
Energy per atom of ground state ref_element, eg. if ref_element=O,
than e_of_element=1/2*E_O2.
.. attribute:: vasprun_dict
Dictionary containing a list of Vaspruns for slab calculations as
items and the corresponding Miller index of the slab as the key
"""
def __init__(self, material_id, vasprun_dict, ref_element,
exclude_ids=[], custom_entries=[], mapi_key=None):
"""
Analyzes surface energies and Wulff shape of a particular
material using the chemical potential.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
vasprun_dict (dict): Dictionary containing a list of Vaspruns
for slab calculations as items and the corresponding Miller
index of the slab as the key.
eg. vasprun_dict = {(1,1,1): [vasprun_111_1, vasprun_111_2,
vasprun_111_3], (1,1,0): [vasprun_111_1, vasprun_111_2], ...}
element: element to be considered as independent
variables. E.g., if you want to show the stability
ranges of all Li-Co-O phases wrt to uLi
exclude_ids (list of material_ids): List of material_ids
to exclude when obtaining the decomposition components
to calculate the chemical potential
custom_entries (list of pymatgen-db type entries): List of
user specified pymatgen-db type entries to use in finding
decomposition components for the chemical potential
mapi_key (str): Materials Project API key for accessing the
MP database via MPRester
"""
self.ref_element = ref_element
self.mprester = MPRester(mapi_key) if mapi_key else MPRester()
self.ucell_entry = \
self.mprester.get_entry_by_material_id(material_id,
inc_structure=True,
property_data=
["formation_energy_per_atom"])
ucell = self.ucell_entry.structure
# Get x and y, the number of species in a formula unit of the bulk
reduced_comp = ucell.composition.reduced_composition.as_dict()
if len(reduced_comp.keys()) == 1:
x = y = reduced_comp[ucell[0].species_string]
else:
for el in reduced_comp.keys():
if self.ref_element == el:
y = reduced_comp[el]
else:
x = reduced_comp[el]
# Calculate Gibbs free energy of the bulk per unit formula
gbulk = self.ucell_entry.energy /\
(len([site for site in ucell
if site.species_string == self.ref_element]) / y)
entries = [entry for entry in
self.mprester.get_entries_in_chemsys(list(reduced_comp.keys()),
property_data=["e_above_hull",
"material_id"])
if entry.data["e_above_hull"] == 0 and
entry.data["material_id"] not in exclude_ids] \
if not custom_entries else custom_entries
pd = PhaseDiagram(entries)
chempot_ranges = pd.get_chempot_range_map([Element(self.ref_element)])
# If no chemical potential is found, we return u=0, eg.
# for a elemental system, the relative u of Cu for Cu is 0
chempot_range = [chempot_ranges[entry] for entry in chempot_ranges.keys()
if entry.composition ==
self.ucell_entry.composition][0][0]._coords if \
chempot_ranges else [[0,0], [0,0]]
e_of_element = [entry.energy_per_atom for entry in
entries if str(entry.composition.reduced_composition)
== self.ref_element + "1"][0]
self.x = x
self.y = y
self.gbulk = gbulk
chempot_range = list(chempot_range)
self.chempot_range = sorted([chempot_range[0][0], chempot_range[1][0]])
self.e_of_element = e_of_element
self.vasprun_dict = vasprun_dict
def calculate_gamma(self, vasprun):
"""
Calculates the surface energy for a single slab.
Args:
vasprun (Vasprun): A Vasprun object
Returns (list): The surface energy for the minimum/maximun
chemical potential and the second list gives the range
of the chemical potential
"""
reduced_comp = self.ucell_entry.composition.reduced_composition.as_dict()
# Get the composition in the slab
slab = vasprun.final_structure
comp = slab.composition.as_dict()
if len(reduced_comp.keys()) == 1:
Ny = comp[self.ucell_entry.structure[0].species_string]
Nx = Ny
else:
for el in reduced_comp.keys():
if self.ref_element == el:
Ny = comp[el]
else:
Nx = comp[el]
# Calculate surface area
m = slab.lattice.matrix
A = np.linalg.norm(np.cross(m[0], m[1]))
# calculate the surface energy for the max and min chemical potential
return [(1 / (2 * A)) * (vasprun.final_energy - (Nx / self.x)
* self.gbulk - (Ny - (self.y / self.x) * Nx)
* (delu + self.e_of_element))
for delu in self.chempot_range]
def wulff_shape_from_chempot(self, chempot, symprec=1e-5):
"""
Method to get the Wulff shape at a specific chemical potential.
Args:
chempot (float): The chemical potential the Wulff Shape exist in.
"""
# Check if the user provided chemical potential is within the
# predetermine range of chemical potential. If not, raise a warning
if not max(self.chempot_range) >= chempot >= min(self.chempot_range):
warnings.warn("The provided chemical potential is outside the range "
"of chemical potential (%s to %s). The resulting Wulff "
"shape might not be reasonable." %(min(self.chempot_range),
max(self.chempot_range)))
latt = SpacegroupAnalyzer(self.ucell_entry.structure).\
get_conventional_standard_structure().lattice
miller_list = self.vasprun_dict.keys()
e_surf_list = []
for hkl in miller_list:
# At each possible configuration, we calculate surface energy as a
# function of u and take the lowest surface energy (corresponds to
# the most stable slab termination at that particular u)
surf_e_range_list = [self.calculate_gamma(vasprun)
for vasprun in self.vasprun_dict[hkl]]
e_list = []
for e_range in surf_e_range_list:
slope, intercept = self.get_slope_and_intercept(e_range)
e_list.append(slope * chempot + intercept)
e_surf_list.append(min(e_list))
return WulffShape(latt, miller_list, e_surf_list, symprec=symprec)
def wulff_shape_dict(self, symprec=1e-5, at_intersections=False):
"""
As the surface energy is a function of chemical potential, so too is the
Wulff shape. This methods generates a dictionary of Wulff shapes at
certain chemical potentials where a facet goes through a transition.
Returns a dict, eg. {chempot1: WulffShape1, chempot2: WulffShape2}
Args:
symprec (float): for recp_operation, default is 1e-5.
at_intersections (bool): Whether to generate a Wulff shape for each
intersection of surface energy for a specific facet (eg. at the
point where a (111) stoichiometric surface energy plot intersects
with the (111) nonstoichiometric plot) or to just generate two
Wulff shapes, one at the min and max chemical potential.
"""
# First lets get the Wulff shape at the
# minimum and maximum chemical potential
wulff_dict = {self.chempot_range[0]: \
self.wulff_shape_from_chempot(self.chempot_range[0],
symprec=symprec),
self.chempot_range[1]: \
self.wulff_shape_from_chempot(self.chempot_range[1],
symprec=symprec)}
# Now we get the Wulff shape each time a facet changes its configuration
# (ie, adsorption coverage, stoichiometric to nonstoichiometric, etc)
if at_intersections:
# Get all values of chemical potential where an intersection occurs
u_at_intersection = [self.get_intersections(hkl)[0] for hkl in
self.vasprun_dict.keys()
if self.get_intersections(hkl)]
# Get a Wulff shape for each intersection. The change in the Wulff shape
# will vary if the rate of change in surface energy for any facet changes
for u in u_at_intersection:
wulff_dict[u] = self.wulff_shape_from_chempot(u, symprec=symprec)
return wulff_dict
def get_slope_and_intercept(self, surf_e_pair):
"""
Returns the slope and intercept of the surface
energy vs chemical potential line
Args:
surf_e_pair ([e_at_min_u, e_at_max_u]): The surface energy at the
minimum chemical potential and maximum chemical potential
"""
slope, intercept, r_value, p_value, std_err = \
linregress(self.chempot_range, surf_e_pair)
slope = 0 if str(slope) == 'nan' else slope
intercept = surf_e_pair[0] if str(intercept) == 'nan' else intercept
return slope, intercept
def get_intersections(self, miller_index):
"""
Returns a all intersections for a specific facet. Useful for
finding when the configuration of a particular facet changes.
Args:
miller_index ((h, k, l)): Miller index of the facet we
are interested in
"""
# First lets calculate the range of surface
# energies for all terminations of a specific facet
all_se_ranges = [self.calculate_gamma(vasprun) for vasprun
in self.vasprun_dict[miller_index]]
if len(all_se_ranges) == 1:
return []
# Now get all possible intersection coordinates for each pair of lines
intersections = []
for pair_ranges in itertools.combinations(all_se_ranges, 2):
slope1, intercept1 = self.get_slope_and_intercept(pair_ranges[0])
slope2, intercept2 = self.get_slope_and_intercept(pair_ranges[1])
# Calculate the intersection coordinates
u = (intercept1-intercept2)/(slope2-slope1)
# if the intersection is beyond the chemical potential
# range or if the lines are parallel, we ignore it
if slope1-slope2 == 0 or u < min(self.chempot_range) \
or u > max(self.chempot_range):
continue
intersections.append([u, slope1 * u + intercept1])
return sorted(intersections, key=lambda ints: ints[0])
def area_frac_vs_chempot_plot(self, cmap=cm.jet, at_intersections=False,
increments=10):
"""
Plots the change in the area contribution of
each facet as a function of chemical potential.
Args:
cmap (cm): A matplotlib colormap object, defaults to jet.
at_intersections (bool): Whether to generate a Wulff shape for each
intersection of surface energy for a specific facet (eg. at the
point where a (111) stoichiometric surface energy plot intersects
with the (111) nonstoichiometric plot) or to just generate two
Wulff shapes, one at the min and max chemical potential.
increments (bool): Number of data points between min/max or point
of intersection. Defaults to 5 points.
"""
# Choose unique colors for each facet
f = [int(i) for i in np.linspace(0, 255, len(self.vasprun_dict.keys()))]
# Get all points of min/max chempot and intersections
chempot_intersections = []
chempot_intersections.extend(self.chempot_range)
for hkl in self.vasprun_dict.keys():
chempot_intersections.extend([ints[0] for ints in
self.get_intersections(hkl)])
chempot_intersections = sorted(chempot_intersections)
# Get all chempots
if at_intersections:
all_chempots = []
for i, intersection in enumerate(chempot_intersections):
if i < len(chempot_intersections)-1:
all_chempots.extend(np.linspace(intersection,
chempot_intersections[i+1],
increments))
else:
all_chempots = np.linspace(min(self.chempot_range),
max(self.chempot_range), increments)
# initialize a dictionary of lists of fractional areas for each hkl
hkl_area_dict = {}
for hkl in self.vasprun_dict.keys():
hkl_area_dict[hkl] = []
# Get plot points for each Miller index
for u in all_chempots:
wulffshape = self.wulff_shape_from_chempot(u)
for hkl in wulffshape.area_fraction_dict.keys():
hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl])
# Plot the area fraction vs chemical potential for each facet
plt = pretty_plot()
for i, hkl in enumerate(self.vasprun_dict.keys()):
# Ignore any facets that never show up on the
# Wulff shape regardless of chemical potential
if all([a == 0 for a in hkl_area_dict[hkl]]):
continue
else:
plt.plot(all_chempots, hkl_area_dict[hkl],
'--', color=cmap(f[i]), label=str(hkl))
# Make the figure look nice
plt.ylim([0,1])
plt.xlim(self.chempot_range)
plt.ylabel(r"Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$")
plt.xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" %(self.ref_element))
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
return plt
def chempot_vs_gamma_plot(self, cmap=cm.jet, show_unstable_points=False):
"""
Plots the surface energy of all facets as a function of chemical potential.
Each facet will be associated with its own distinct colors. Dashed lines
will represent stoichiometries different from that of the mpid's compound.
Args:
cmap (cm): A matplotlib colormap object, defaults to jet.
show_unstable_points (bool): For each facet, there may be various
terminations or stoichiometries and the relative stability of
these different slabs may change with chemical potential. This
option will only plot the most stable surface energy for a
given chemical potential.
"""
plt = pretty_plot()
# Choose unique colors for each facet
f = [int(i) for i in np.linspace(0, 255, sum([len(vaspruns) for vaspruns in
self.vasprun_dict.values()]))]
i, already_labelled, colors = 0, [], []
for hkl in self.vasprun_dict.keys():
for vasprun in self.vasprun_dict[hkl]:
slab = vasprun.final_structure
# Generate a label for the type of slab
label = str(hkl)
# use dashed lines for slabs that are not stoichiometric
# wrt bulk. Label with formula if nonstoichiometric
if slab.composition.reduced_composition != \
self.ucell_entry.composition.reduced_composition:
mark = '--'
label += " %s" % (slab.composition.reduced_composition)
else:
mark = '-'
# label the chemical environment at the surface if different from the bulk.
# First get the surface sites, then get the reduced composition at the surface
# s = vasprun.final_structure
# ucell = SpacegroupAnalyzer(self.ucell_entry.structure).\
# get_conventional_standard_structure()
# slab = Slab(s.lattice, s.species, s.frac_coords, hkl, ucell, 0, None)
# surf_comp = slab.surface_composition()
#
# if surf_comp.reduced_composition != ucell.composition.reduced_composition:
# label += " %s" %(surf_comp.reduced_composition)
if label in already_labelled:
c = colors[already_labelled.index(label)]
label = None
else:
already_labelled.append(label)
c = cmap(f[i])
colors.append(c)
se_range = self.calculate_gamma(vasprun)
plt.plot(self.chempot_range, se_range, mark, color=c, label=label)
i += 1
# Make the figure look nice
axes = plt.gca()
ylim = axes.get_ylim()
plt.ylim(ylim)
plt.xlim(self.chempot_range)
plt.ylabel(r"Surface energy (eV/$\AA$)")
plt.xlabel(r"Chemical potential $\Delta\mu_{%s}$ (eV)" %(self.ref_element))
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
return plt
def broken_bond_vs_gamma(self):
return
|
mit
|
phdowling/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
lioritan/Thesis
|
small_datasets_maker.py
|
1
|
2307
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 03 18:31:46 2015
@author: liorf
"""
import cPickle
from random import sample
from numpy import *
from sklearn.datasets import fetch_20newsgroups
def make_subset(data, labels, cat_size, legal_cats=None):
'''
cat_size= desired size for each label
Note: this expects data only from labels given.
'''
# print len(data), len(labels)
new_data= []
new_labels= []
categories= frozenset(labels)
if legal_cats is not None:
categories= frozenset(legal_cats)
for cat in categories:
inds= find(labels==cat)
sub_inds= sample(inds, cat_size)
for ind in sub_inds:
new_data.append(data[ind])
new_labels.append(labels[ind])
return array(new_data, dtype=object), array(new_labels)
#pick cat_size inds at randm, then put them in...
if __name__=='__main__':
pass
#do for OHSUMED, OHSUMED titles only, 20NG
#50 train for each cat+50 test -> 100xnum_cats
#ohsumed datasets: needs more things (need to first filter out the categories!)
# with open('./problems/ohsumed_dataset_parsed.pkl', 'rb') as fptr:
# ((data, labels), (_,_))= cPickle.load(fptr)
# data,labels= array(data), array(labels)
# (data, labels)= make_subset(data, labels, 100, [1,4,6,8,10,12,14,20,21,23])
# with open('./problems/ohsumed_small_subset.pkl','wb') as fptt:
# cPickle.dump((data,labels), fptt, -1)
# print 'one'
# with open('./problems/ohsumed_titles_parsed_complete.pkl', 'rb') as fptr:
# (data, labels)= cPickle.load(fptr)
# data,labels= array(data), array(labels)
# (data, labels)= make_subset(data, labels, 100, [1, 4, 6, 8, 10, 13, 14, 17, 20, 23])
# with open('./problems/ohsumed_titles_only_small_subset.pkl','wb') as fptt:
# cPickle.dump((data,labels), fptt, -1)
# print 'two'
# newsgroups = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))
# fixed_data = array([s.lower().replace('\n','').split(' ') for s in newsgroups.data])
# (data, labels)= make_subset(fixed_data, newsgroups.target, 100)
# with open('./problems/20NG_small_subset.pkl', 'wb') as fptr:
# cPickle.dump((data, labels), fptr, -1)
# print 'three'
|
gpl-2.0
|
sunzhxjs/JobGIS
|
lib/python2.7/site-packages/pandas/tseries/tests/test_holiday.py
|
9
|
14516
|
from datetime import datetime
import pandas.util.testing as tm
from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (
USFederalHolidayCalendar, USMemorialDay, USThanksgivingDay,
nearest_workday, next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday, DateOffset,
MO, Timestamp, AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday, previous_workday,
before_nearest_workday, EasterMonday, GoodFriday,
after_nearest_workday, weekend_to_monday, USLaborDay,
USColumbusDay, USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
import nose
class TestCalendar(tm.TestCase):
def setUp(self):
self.holiday_list = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25)]
self.start_date = datetime(2012, 1, 1)
self.end_date = datetime(2012, 12, 31)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(self.start_date,
self.end_date)
holidays_1 = calendar.holidays(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = calendar.holidays(
Timestamp(self.start_date),
Timestamp(self.end_date))
self.assertEqual(list(holidays.to_pydatetime()),
self.holiday_list)
self.assertEqual(list(holidays_1.to_pydatetime()),
self.holiday_list)
self.assertEqual(list(holidays_2.to_pydatetime()),
self.holiday_list)
def test_calendar_caching(self):
# Test for issue #9552
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super(TestCalendar, self).__init__(
name=name,
rules=rules
)
jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)])
tm.assert_index_equal(
jan1.holidays(),
DatetimeIndex(['01-Jan-2015'])
)
tm.assert_index_equal(
jan2.holidays(),
DatetimeIndex(['02-Jan-2015'])
)
def test_calendar_observance_dates(self):
# Test for issue 11477
USFedCal = get_calendar('USFederalHolidayCalendar')
holidays0 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,3)) # <-- same start and end dates
holidays1 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,6)) # <-- different start and end dates
holidays2 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,3)) # <-- same start and end dates
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name(self):
USFedCal = get_calendar('USFederalHolidayCalendar')
self.assertEqual(USFedCal.rule_from_name('Thanksgiving'), USThanksgivingDay)
class TestHoliday(tm.TestCase):
def setUp(self):
self.start_date = datetime(2011, 1, 1)
self.end_date = datetime(2020, 12, 31)
def check_results(self, holiday, start, end, expected):
self.assertEqual(list(holiday.dates(start, end)), expected)
# Verify that timezone info is preserved.
self.assertEqual(
list(
holiday.dates(
utc.localize(Timestamp(start)),
utc.localize(Timestamp(end)),
)
),
[utc.localize(dt) for dt in expected],
)
def test_usmemorialday(self):
self.check_results(
holiday=USMemorialDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
],
)
def test_non_observed_holiday(self):
self.check_results(
Holiday('July 4th Eve', month=7, day=3),
start="2001-01-01",
end="2003-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00')
]
)
self.check_results(
Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)),
start="2001-01-01",
end="2008-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00'),
Timestamp('2003-07-03 00:00:00'),
Timestamp('2006-07-03 00:00:00'),
Timestamp('2007-07-03 00:00:00'),
]
)
def test_easter(self):
self.check_results(
EasterMonday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-25 00:00:00'),
Timestamp('2012-04-09 00:00:00'),
Timestamp('2013-04-01 00:00:00'),
Timestamp('2014-04-21 00:00:00'),
Timestamp('2015-04-06 00:00:00'),
Timestamp('2016-03-28 00:00:00'),
Timestamp('2017-04-17 00:00:00'),
Timestamp('2018-04-02 00:00:00'),
Timestamp('2019-04-22 00:00:00'),
Timestamp('2020-04-13 00:00:00'),
],
)
self.check_results(
GoodFriday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-22 00:00:00'),
Timestamp('2012-04-06 00:00:00'),
Timestamp('2013-03-29 00:00:00'),
Timestamp('2014-04-18 00:00:00'),
Timestamp('2015-04-03 00:00:00'),
Timestamp('2016-03-25 00:00:00'),
Timestamp('2017-04-14 00:00:00'),
Timestamp('2018-03-30 00:00:00'),
Timestamp('2019-04-19 00:00:00'),
Timestamp('2020-04-10 00:00:00'),
],
)
def test_usthanksgivingday(self):
self.check_results(
USThanksgivingDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
],
)
def test_holidays_within_dates(self):
# Fix holiday behavior found in #11477
# where holiday.dates returned dates outside start/end date
# or observed rules could not be applied as the holiday
# was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
start_date = datetime(2015, 7, 1)
end_date = datetime(2015, 7, 1)
calendar = get_calendar('USFederalHolidayCalendar')
new_years = calendar.rule_from_name('New Years Day')
july_4th = calendar.rule_from_name('July 4th')
veterans_day = calendar.rule_from_name('Veterans Day')
christmas = calendar.rule_from_name('Christmas')
# Holiday: (start/end date, holiday)
holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
USLaborDay: ("2015-09-07", "2015-09-07"),
USColumbusDay: ("2015-10-12", "2015-10-12"),
USThanksgivingDay: ("2015-11-26", "2015-11-26"),
USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
USPresidentsDay: ("2015-02-16", "2015-02-16"),
GoodFriday: ("2015-04-03", "2015-04-03"),
EasterMonday: [("2015-04-06", "2015-04-06"),
("2015-04-05", [])],
new_years: [("2015-01-01", "2015-01-01"),
("2011-01-01", []),
("2010-12-31", "2010-12-31")],
july_4th: [("2015-07-03", "2015-07-03"),
("2015-07-04", [])],
veterans_day: [("2012-11-11", []),
("2012-11-12", "2012-11-12")],
christmas: [("2011-12-25", []),
("2011-12-26", "2011-12-26")]}
for rule, dates in compat.iteritems(holidays):
empty_dates = rule.dates(start_date, end_date)
self.assertEqual(empty_dates.tolist(), [])
if isinstance(dates, tuple):
dates = [dates]
for start, expected in dates:
if len(expected):
expected = [Timestamp(expected)]
self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date,
self.end_date)
holidays_1 = USThanksgivingDay.dates(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = USThanksgivingDay.dates(
Timestamp(self.start_date),
Timestamp(self.end_date))
self.assert_index_equal(holidays, holidays_1)
self.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
holiday_1 = Holiday('One-Time', year=2012, month=5, day=28)
holiday_2 = Holiday('Range', month=5, day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)))
self.assertEqual(base_date,
holiday_1.dates(self.start_date, self.end_date))
self.assertEqual(base_date,
holiday_2.dates(self.start_date, self.end_date))
def test_get_calendar(self):
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar('TestCalendar')
self.assertEqual(TestCalendar, calendar.__class__)
def test_factory(self):
class_1 = HolidayCalendarFactory('MemorialDay', AbstractHolidayCalendar,
USMemorialDay)
class_2 = HolidayCalendarFactory('Thansksgiving', AbstractHolidayCalendar,
USThanksgivingDay)
class_3 = HolidayCalendarFactory('Combined', class_1, class_2)
self.assertEqual(len(class_1.rules), 1)
self.assertEqual(len(class_2.rules), 1)
self.assertEqual(len(class_3.rules), 2)
class TestObservanceRules(tm.TestCase):
def setUp(self):
self.we = datetime(2014, 4, 9)
self.th = datetime(2014, 4, 10)
self.fr = datetime(2014, 4, 11)
self.sa = datetime(2014, 4, 12)
self.su = datetime(2014, 4, 13)
self.mo = datetime(2014, 4, 14)
self.tu = datetime(2014, 4, 15)
def test_next_monday(self):
self.assertEqual(next_monday(self.sa), self.mo)
self.assertEqual(next_monday(self.su), self.mo)
def test_next_monday_or_tuesday(self):
self.assertEqual(next_monday_or_tuesday(self.sa), self.mo)
self.assertEqual(next_monday_or_tuesday(self.su), self.tu)
self.assertEqual(next_monday_or_tuesday(self.mo), self.tu)
def test_previous_friday(self):
self.assertEqual(previous_friday(self.sa), self.fr)
self.assertEqual(previous_friday(self.su), self.fr)
def test_sunday_to_monday(self):
self.assertEqual(sunday_to_monday(self.su), self.mo)
def test_nearest_workday(self):
self.assertEqual(nearest_workday(self.sa), self.fr)
self.assertEqual(nearest_workday(self.su), self.mo)
self.assertEqual(nearest_workday(self.mo), self.mo)
def test_weekend_to_monday(self):
self.assertEqual(weekend_to_monday(self.sa), self.mo)
self.assertEqual(weekend_to_monday(self.su), self.mo)
self.assertEqual(weekend_to_monday(self.mo), self.mo)
def test_next_workday(self):
self.assertEqual(next_workday(self.sa), self.mo)
self.assertEqual(next_workday(self.su), self.mo)
self.assertEqual(next_workday(self.mo), self.tu)
def test_previous_workday(self):
self.assertEqual(previous_workday(self.sa), self.fr)
self.assertEqual(previous_workday(self.su), self.fr)
self.assertEqual(previous_workday(self.tu), self.mo)
def test_before_nearest_workday(self):
self.assertEqual(before_nearest_workday(self.sa), self.th)
self.assertEqual(before_nearest_workday(self.su), self.fr)
self.assertEqual(before_nearest_workday(self.tu), self.mo)
def test_after_nearest_workday(self):
self.assertEqual(after_nearest_workday(self.sa), self.mo)
self.assertEqual(after_nearest_workday(self.su), self.tu)
self.assertEqual(after_nearest_workday(self.fr), self.mo)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
berkeley-stat159/project-gamma
|
code/linear_model.py
|
2
|
12538
|
"""
Script for linear modeling of fMRI data to identify noise regressors and to investigate activation clusters.
"""
import project_config
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import os
import pandas as pd
from utils import general_utils
from multiple_comparison import multiple_comp
from general_utils import form_cond_filepath, prepare_standard_data, prepare_mask
from stimuli_revised import events2neural_std
from conv import conv_target_non_target, conv_std
from gaussian_filter import spatial_smooth
from matplotlib import colors
from hypothesis import compute_t_values
def plot_first_four_pcs(U, Y, depth, output_filename):
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[in_brain_mask, :] = projections.T
fig = plt.figure()
for map_index, pc_index in ((221, 0),(222, 1),(223, 2),(224, 3)):
ax = fig.add_subplot(map_index)
ax.set_title("sub%s,z=%d,nth_pc=%s" % (subject_num, depth, pc_index))
ax.imshow(projection_vols[:,:,depth,pc_index], interpolation="nearest", cmap="gray")
plt.tight_layout()
plt.savefig(os.path.join(output_filename, "sub011_task001_first_four_pcs.png"), format='png', dpi=500)
def plot_target_betas_n_back(t_vols_n_back_beta_0, b_vols_smooth_n_back, in_brain_mask, brain_structure, nice_cmap, n_back):
beta_index = 0
plt.figure()
b_vols_smooth_n_back[~in_brain_mask] = np.nan
t_vols_n_back_beta_0[~in_brain_mask] = np.nan
min_val = np.nanmin(b_vols_smooth_n_back[...,(40,50,60),beta_index])
max_val = np.nanmax(b_vols_smooth_n_back[...,(40,50,60),beta_index])
for map_index, depth in (((3,2,1), 40),((3,2,3), 50),((3,2,5), 60)):
plt.subplot(*map_index)
plt.title("z=%d,%s" % (depth, n_back + "-back target,beta values"))
plt.imshow(brain_structure[...,depth], alpha=0.5)
plt.imshow(b_vols_smooth_n_back[...,depth,beta_index], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
plt.colorbar()
plt.tight_layout()
t_min_val = np.nanmin(t_vols_n_back_beta_0[...,(40,50,60)])
t_max_val = np.nanmax(t_vols_n_back_beta_0[...,(40,50,60)])
for map_index, depth in (((3,2,2), 40),((3,2,4), 50),((3,2,6), 60)):
plt.subplot(*map_index)
plt.title("z=%d,%s" % (depth, n_back + "-back target,t values"))
plt.imshow(brain_structure[...,depth], alpha=0.5)
plt.imshow(t_vols_n_back_beta_0[...,depth], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(output_filename, "sub011_target_betas_%s_back.png" % (n_back)), format='png', dpi=500)
def plot_nontarget_betas_n_back(t_vols_n_back_beta_1, b_vols_smooth_n_back, in_brain_mask, brain_structure, nice_cmap, n_back):
beta_index = 1
b_vols_smooth_n_back[~in_brain_mask] = np.nan
t_vols_n_back_beta_1[~in_brain_mask] = np.nan
min_val = np.nanmin(b_vols_smooth_n_back[...,(40,50,60),beta_index])
max_val = np.nanmax(b_vols_smooth_n_back[...,(40,50,60),beta_index])
plt.figure()
for map_index, depth in (((3,2,1), 40),((3,2,3), 50),((3,2,5), 60)):
plt.subplot(*map_index)
plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,beta values"))
plt.imshow(brain_structure[...,depth], alpha=0.5)
plt.imshow(b_vols_smooth_n_back[...,depth,beta_index], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
plt.colorbar()
plt.tight_layout()
t_min_val = np.nanmin(t_vols_n_back_beta_1[...,(40,50,60)])
t_max_val = np.nanmax(t_vols_n_back_beta_1[...,(40,50,60)])
for map_index, depth in (((3,2,2), 40),((3,2,4), 50),((3,2,6), 60)):
plt.subplot(*map_index)
plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,t values"))
plt.imshow(brain_structure[...,depth], alpha=0.5)
plt.imshow(t_vols_n_back_beta_1[...,depth], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(output_filename, "sub011_nontarget_betas_%s_back.png" % (n_back)), format='png', dpi=500)
def plot_noise_regressor_betas(b_vols_smooth, t_vols_beta_6_to_9, brain_structure, in_brain_mask, nice_cmap):
plt.figure()
plt.subplot(3,2,1)
plt.title("z=%d,%s" % (40, "linear drift,betas"))
b_vols_smooth[~in_brain_mask] = np.nan
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(b_vols_smooth[...,40,6], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
plt.subplot(3,2,3)
plt.title("z=%d,%s" % (40, "quadratic drift,betas"))
b_vols_smooth[~in_brain_mask] = np.nan
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(b_vols_smooth[...,40,7], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
plt.subplot(3,2,5)
plt.title("z=%d,%s" % (40, "second PC,betas"))
b_vols_smooth[~in_brain_mask] = np.nan
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(b_vols_smooth[...,40,9], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
t_vols_beta_6_to_9[0][~in_brain_mask] = np.nan
t_vols_beta_6_to_9[1][~in_brain_mask] = np.nan
t_vols_beta_6_to_9[3][~in_brain_mask] = np.nan
plt.subplot(3,2,2)
plt.title("z=%d,%s" % (40, "linear drift,t values"))
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(t_vols_beta_6_to_9[0][...,40], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
plt.subplot(3,2,4)
plt.title("z=%d,%s" % (40, "quadratic drift,t values"))
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(t_vols_beta_6_to_9[1][...,40], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
plt.subplot(3,2,6)
plt.title("z=%d,%s" % (40, "second PC,t values"))
plt.imshow(brain_structure[...,40], alpha=0.5)
plt.imshow(t_vols_beta_6_to_9[3][...,40], cmap=nice_cmap, alpha=0.5)
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.join(output_filename, "sub011_0_back_noise_regressors_betas_map.png"), format='png', dpi=500)
def single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename):
data = prepare_standard_data(subject_num, task_num, standard_source_prefix)
n_trs = data.shape[-1] + 5
cond_filename_003 = form_cond_filepath(subject_num, task_num, "003", cond_filepath_prefix)
cond_filename_005 = form_cond_filepath(subject_num, task_num, "005", cond_filepath_prefix)
cond_filename_001 = form_cond_filepath(subject_num, task_num, "001", cond_filepath_prefix)
cond_filename_004 = form_cond_filepath(subject_num, task_num, "004", cond_filepath_prefix)
cond_filename_007 = form_cond_filepath(subject_num, task_num, "007", cond_filepath_prefix)
target_convolved, nontarget_convolved, error_convolved = conv_target_non_target(n_trs, cond_filename_003, cond_filename_007, TR, tr_divs = 100.0)
target_convolved, nontarget_convolved, error_convolved = target_convolved[5:], nontarget_convolved[5:], error_convolved[5:]
block_regressor = events2neural_std(cond_filename_005, TR, n_trs)[5:]
block_start_cues = conv_std(n_trs, cond_filename_001, TR)[5:]
block_end_cues = conv_std(n_trs, cond_filename_004, TR)[5:]
linear_drift = np.linspace(-1, 1, n_trs)
qudratic_drift = linear_drift ** 2
qudratic_drift -= np.mean(qudratic_drift)
linear_drift = linear_drift[5:]
qudratic_drift = qudratic_drift[5:]
in_brain_mask, _ = prepare_mask(data, 5000)
pad_thickness = 2.0
sigma = 2.0
b_vols = spatial_smooth(data, in_brain_mask, pad_thickness, sigma, False)
in_brain_tcs = b_vols[in_brain_mask]
Y = in_brain_tcs.T
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
n_betas = 11
X = np.ones((n_trs - 5, n_betas))
X[:, 0] = target_convolved
X[:, 1] = nontarget_convolved
X[:, 2] = error_convolved
X[:, 3] = block_regressor
X[:, 4] = block_start_cues
X[:, 5] = block_end_cues
X[:, 6] = linear_drift
X[:, 7] = qudratic_drift
X[:, 8] = U[:,0]
X[:, 9] = U[:,1]
# 10th column is the intercept
# plot design matrix
plt.figure()
plt.imshow(X, aspect=0.1)
plt.savefig(os.path.join(output_filename, "sub%s_task%s_design_matrix.png" % (subject_num, task_num)), format='png', dpi=500)
B = npl.pinv(X).dot(Y)
# test normality of residuals
residuals = Y.T - X.dot(B).T
alpha_test, bonferroni_test, hochberg_test, benjamini_test = [val * 1.0 / Y.shape[-1] for val in multiple_comp(residuals)]
normality_test_results = {"Alpha Test":alpha_test, "Bonferroni Procedure":bonferroni_test,"Hochberg Procedure":hochberg_test,"Benjamini-Hochberg Procedure":benjamini_test}
normality_test_pd = pd.DataFrame(normality_test_results, index=["Failure Rate"])
normality_test_pd.to_csv(os.path.join(output_filename, "sub%s_task%s_linear_model_normality_tests_failure_rates.csv" % (subject_num, task_num)))
rs_squared = []
for i in range(Y.shape[-1]):
r_squared = 1 - np.sum((Y[:,i] - X.dot(B[:,i]))**2) * 1.0 / np.sum((Y[:,i] - np.mean(Y[:,i])) ** 2)
rs_squared.append(r_squared)
np.savetxt(os.path.join(output_filename, "glm_mean_R_squared_" + ("0_back" if task_num == "001" else "2_back") + ".txt"), np.array([np.mean(rs_squared)]))
b_vols = np.zeros((data.shape[0:-1] + (n_betas,)))
b_vols[in_brain_mask, :] = B.T
# compute t values for target and nontarget betas
t_test_target_beta = 0
t_values = compute_t_values(X, B, Y, t_test_target_beta)
t_vols_beta_0 = np.zeros((data.shape[0:-1]))
t_vols_beta_0[in_brain_mask] = t_values
t_test_target_beta = 1
t_values = compute_t_values(X, B, Y, t_test_target_beta)
t_vols_beta_1 = np.zeros((data.shape[0:-1]))
t_vols_beta_1[in_brain_mask] = t_values
# compute t values for noise regressor betas
t_values = compute_t_values(X, B, Y, 6)
t_vols_beta_6 = np.zeros((data.shape[0:-1]))
t_vols_beta_6[in_brain_mask] = t_values
t_values = compute_t_values(X, B, Y, 7)
t_vols_beta_7 = np.zeros((data.shape[0:-1]))
t_vols_beta_7[in_brain_mask] = t_values
t_values = compute_t_values(X, B, Y, 8)
t_vols_beta_8 = np.zeros((data.shape[0:-1]))
t_vols_beta_8[in_brain_mask] = t_values
t_values = compute_t_values(X, B, Y, 9)
t_vols_beta_9 = np.zeros((data.shape[0:-1]))
t_vols_beta_9[in_brain_mask] = t_values
t_vols_beta_6_to_9 = [t_vols_beta_6, t_vols_beta_7, t_vols_beta_8, t_vols_beta_9]
return b_vols, in_brain_mask, U, Y, data, t_vols_beta_0, t_vols_beta_1, t_vols_beta_6_to_9
if __name__ == "__main__":
# single subject, 0-back
data_dir_path = os.path.join(os.path.dirname(__file__), "..", "data")
standard_source_prefix = os.path.join(data_dir_path, "preprocessed")
cond_filepath_prefix = os.path.join(data_dir_path, "condition_files")
brain_structure_path = os.path.join(data_dir_path, "mni_icbm152_csf_tal_nlin_asym_09c_2mm.nii")
nice_cmap_values_path = os.path.join(data_dir_path, "actc.txt")
output_filename = os.path.join(os.path.dirname(__file__), "..", "results")
subject_num = "011"
task_num = "001"
TR = project_config.TR
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
brain_structure = nib.load(brain_structure_path).get_data()
nice_cmap_values = np.loadtxt(nice_cmap_values_path)
b_vols_smooth_0_back, in_brain_mask, U, Y, data, t_vols_0_back_beta_0, t_vols_0_back_beta_1, t_vols_beta_6_to_9 = single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename)
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
plot_noise_regressor_betas(b_vols_smooth_0_back, t_vols_beta_6_to_9, brain_structure, in_brain_mask, nice_cmap)
plot_target_betas_n_back(t_vols_0_back_beta_0, b_vols_smooth_0_back, in_brain_mask, brain_structure, nice_cmap, "0")
plot_nontarget_betas_n_back(t_vols_0_back_beta_1, b_vols_smooth_0_back, in_brain_mask, brain_structure, nice_cmap, "0")
# projection of first four
plot_first_four_pcs(U, Y, 40, output_filename)
# single subject, 0-back vs. 2-back
task_num = "003"
b_vols_smooth_2_back, in_brain_mask, U, Y, data, t_vols_2_back_beta_0, t_vols_2_back_beta_1, _ = single_subject_linear_model(standard_source_prefix, cond_filepath_prefix, subject_num, task_num, output_filename)
plot_target_betas_n_back(t_vols_2_back_beta_0, b_vols_smooth_2_back, in_brain_mask, brain_structure, nice_cmap, "2")
plot_nontarget_betas_n_back(t_vols_2_back_beta_1, b_vols_smooth_2_back, in_brain_mask, brain_structure, nice_cmap, "2")
|
bsd-3-clause
|
jaeilepp/mne-python
|
examples/decoding/plot_decoding_unsupervised_spatial_filter.py
|
3
|
2466
|
"""
==================================================================
Analysis of evoked response using ICA and PCA reduction techniques
==================================================================
This example computes PCA and ICA of evoked or epochs data. Then the
PCA / ICA components, a.k.a. spatial filters, are used to transform
the channel data to new sources / virtual channels. The output is
visualized on the average of all the epochs.
"""
# Authors: Jean-Remi King <[email protected]>
# Asish Panda <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.decoding import UnsupervisedSpatialFilter
from sklearn.decomposition import PCA, FastICA
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
X = epochs.get_data()
##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev.plot(show=False, window_title="PCA")
##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev1.plot(show=False, window_title='ICA')
plt.show()
|
bsd-3-clause
|
florian-f/sklearn
|
sklearn/neighbors/classification.py
|
2
|
11565
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
#
# License: BSD, (C) INRIA, University of Amsterdam
import warnings
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the odering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform',
algorithm='auto', leaf_size=30, p=2, **kwargs):
if kwargs:
if 'warn_on_equidistant' in kwargs:
warnings.warn("The warn_on_equidistant parameter is "
"deprecated and will be removed in the future.",
DeprecationWarning,
stacklevel=2)
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, p=p)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode, _ = stats.mode(pred_labels, axis=1)
else:
mode, _ = weighted_mode(pred_labels, weights, axis=1)
return self.classes_.take(mode.flatten().astype(np.int))
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X: array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Probabilities of the samples for each class in the model,
where classes are ordered arithmetically.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(pred_labels)
probabilities = np.zeros((X.shape[0], self.classes_.size))
# a simple ':' index doesn't work right
all_rows = np.arange(X.shape[0])
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
probabilities[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
probabilities = (probabilities.T / probabilities.sum(axis=1)).T
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label: int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, outlier_label=None):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
pred_labels = np.array([self._y[ind] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0] for pl in pred_labels[inliers]],
dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w) in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel().astype(np.int)
prediction = np.empty(n_samples, dtype=self.classes_.dtype)
prediction[inliers] = self.classes_.take(mode)
if outliers:
prediction[outliers] = self.outlier_label
return prediction
|
bsd-3-clause
|
apdjustino/DRCOG_Urbansim
|
src/drcog/models/hlcm_simulation.py
|
1
|
7580
|
import synthicity.urbansim.interaction as interaction
import pandas as pd, numpy as np, copy
from synthicity.utils import misc
from drcog.models import transition
def simulate(dset,year,depvar = 'building_id',alternatives=None,simulation_table = 'households',
output_names=None,agents_groupby = ['income_3_tenure',],transition_config=None,relocation_config=None):
output_csv, output_title, coeff_name, output_varname = output_names
if transition_config['Enabled']:
ct = dset.fetch(transition_config['control_totals_table'])
if 'persons' in ct.columns:
del ct['persons']
ct["total_number_of_households"] = (ct["total_number_of_households"]*transition_config['scaling_factor']).astype('int32')
hh = dset.fetch('households')
persons = dset.fetch('persons')
tran = transition.TabularTotalsTransition(ct, 'total_number_of_households')
model = transition.TransitionModel(tran)
#import pdb; pdb.set_trace()
new, added, new_linked = model.transition(
hh, year, linked_tables={'linked': (persons, 'household_id')})
new.loc[added,'building_id'] = -1
dset.d['households'] = new
dset.d['persons'] = new_linked['linked']
# new_hhlds = {"table": "dset.households","writetotmp": "households","model": "transitionmodel","first_year": 2010,"control_totals": "dset.%s"%transition_config['control_totals_table'],
# "geography_field": "building_id","amount_field": "total_number_of_households"}
# import synthicity.urbansim.transitionmodel as transitionmodel
# transitionmodel.simulate(dset,new_hhlds,year=year,show=True,subtract=True)
dset.households.index.name = 'household_id'
choosers = dset.fetch(simulation_table)
if relocation_config['Enabled']:
rate_table = dset.store[relocation_config['relocation_rates_table']].copy()
rate_field = "probability_of_relocating"
rate_table[rate_field] = rate_table[rate_field]*.01*relocation_config['scaling_factor']
movers = dset.relocation_rates(choosers,rate_table,rate_field)
choosers[depvar].ix[movers] = -1
movers = choosers[choosers[depvar]==-1]
print "Total new agents and movers = %d" % len(movers.index)
empty_units = dset.buildings[(dset.buildings.residential_units>0)].residential_units.sub(choosers.groupby('building_id').size(),fill_value=0)
empty_units = empty_units[empty_units>0].order(ascending=False)
alternatives = alternatives.ix[np.repeat(empty_units.index.values,empty_units.values.astype('int'))]
alts = alternatives
pdf = pd.DataFrame(index=alts.index)
segments = movers.groupby(agents_groupby)
for name, segment in segments:
segment = segment.head(1)
name = str(name)
tmp_outcsv, tmp_outtitle, tmp_coeffname = output_csv%name, output_title%name, coeff_name%name
ind_vars = dset.coeffs[(tmp_coeffname, 'fnames')][np.invert(dset.coeffs[(tmp_coeffname, 'fnames')].isnull().values)].values.tolist()
SAMPLE_SIZE = alts.index.size
numchoosers = segment.shape[0]
numalts = alts.shape[0]
sample = np.tile(alts.index.values,numchoosers)
alts_sample = alts
alts_sample.loc[:, 'join_index'] = np.repeat(segment.index.values,SAMPLE_SIZE) # corrected chained index error
alts_sample = pd.merge(alts_sample,segment,left_on='join_index',right_index=True,suffixes=('','_r'))
chosen = np.zeros((numchoosers,SAMPLE_SIZE))
chosen[:,0] = 1
sample, alternative_sample, est_params = sample, alts_sample, ('mnl',chosen)
##Interaction variables
interaction_vars = [(var, var.split('_x_')) for var in ind_vars if '_x_' in var]
for ivar in interaction_vars:
if ivar[1][0].endswith('gt'):
alternative_sample[ivar[0]] = ((alternative_sample[ivar[1][0]])>alternative_sample[ivar[1][1]]).astype('int32')
if ivar[1][0].endswith('lt'):
alternative_sample[ivar[0]] = ((alternative_sample[ivar[1][0]])<alternative_sample[ivar[1][1]]).astype('int32')
else:
alternative_sample[ivar[0]] = ((alternative_sample[ivar[1][0]])*alternative_sample[ivar[1][1]])
est_data = pd.DataFrame(index=alternative_sample.index)
for varname in ind_vars:
est_data[varname] = alternative_sample[varname]
est_data = est_data.fillna(0)
data = est_data
data = data.as_matrix()
coeff = dset.load_coeff(tmp_coeffname)
probs = interaction.mnl_simulate(data,coeff,numalts=SAMPLE_SIZE,returnprobs=1)
pdf['segment%s'%name] = pd.Series(probs.flatten(),index=alts.index)
new_homes = pd.Series(np.ones(len(movers.index))*-1,index=movers.index)
for name, segment in segments:
name_coeff = str(name)
name = str(name)
p=pdf['segment%s'%name].values
mask = np.zeros(len(alts.index),dtype='bool')
print "Assigning units to %d agents of segment %s" % (len(segment.index),name)
def choose(p,mask,alternatives,segment,new_homes,minsize=None):
p = copy.copy(p)
p[mask] = 0 # already chosen
try:
indexes = np.random.choice(len(alternatives.index),len(segment.index),replace=False,p=p/p.sum())
except:
print "WARNING: not enough options to fit agents, will result in unplaced agents"
return mask,new_homes
new_homes.ix[segment.index] = alternatives.index.values[indexes]
mask[indexes] = 1
return mask,new_homes
mask,new_homes = choose(p,mask,alts,segment,new_homes)
build_cnts = new_homes.value_counts() #num households place in each building
print "Assigned %d agents to %d locations with %d unplaced" % (new_homes.size,build_cnts.size,build_cnts.get(-1,0))
table = dset.households # need to go back to the whole dataset
table[depvar].ix[new_homes.index] = new_homes.values.astype('int32')
dset.store_attr(output_varname,year,copy.deepcopy(table[depvar]))
if __name__ == '__main__':
from drcog.models import dataset
from drcog.variables import variable_library
import os
import cProfile
dset = dataset.DRCOGDataset(os.path.join(misc.data_dir(),'drcog.h5'))
#Load estimated coefficients
coeff_store = pd.HDFStore(os.path.join(misc.data_dir(),'coeffs.h5'))
dset.coeffs = coeff_store.coeffs.copy()
coeff_store.close()
coeff_store = pd.HDFStore(os.path.join(misc.data_dir(),'coeffs_res.h5'))
dset.coeffs_res = coeff_store.coeffs_res.copy()
coeff_store.close()
variable_library.calculate_variables(dset)
alternatives = dset.buildings[(dset.buildings.residential_units>0)]
sim_year = 2011
fnc = "simulate(dset, year=sim_year,depvar = 'building_id',alternatives=alternatives,simulation_table = 'households',output_names = ('drcog-coeff-hlcm-%s.csv','DRCOG HOUSEHOLD LOCATION CHOICE MODELS (%s)','hh_location_%s','household_building_ids')," +\
"agents_groupby= ['income_3_tenure',],transition_config = {'Enabled':True,'control_totals_table':'annual_household_control_totals','scaling_factor':1.0}," +\
"relocation_config = {'Enabled':True,'relocation_rates_table':'annual_household_relocation_rates','scaling_factor':1.0},)"
cProfile.run(fnc, 'c:/users/jmartinez/documents/projects/urbansim/cprofile/hlcm')
|
agpl-3.0
|
bsipocz/statsmodels
|
statsmodels/examples/ex_kernel_regression_sigtest.py
|
34
|
3177
|
# -*- coding: utf-8 -*-
"""Kernel Regression and Significance Test
Warning: SLOW, 11 minutes on my computer
Created on Thu Jan 03 20:20:47 2013
Author: Josef Perktold
results - this version
----------------------
>>> exec(open('ex_kernel_regression_censored1.py').read())
bw
[ 0.3987821 0.50933458]
[0.39878209999999997, 0.50933457999999998]
sig_test - default
Not Significant
pvalue
0.11
test statistic 0.000434305313291
bootstrap critical values
[ 0.00043875 0.00046808 0.0005064 0.00054151]
sig_test - pivot=True, nboot=200, nested_res=50
pvalue
0.01
test statistic 6.17877171579
bootstrap critical values
[ 5.5658345 5.74761076 5.87386858 6.46012041]
times: 8.34599995613 20.6909999847 666.373999834
"""
from __future__ import print_function
import time
import numpy as np
import statsmodels.nonparametric.api as nparam
import statsmodels.nonparametric.kernel_regression as smkr
if __name__ == '__main__':
t0 = time.time()
#example from test file
nobs = 200
np.random.seed(1234)
C1 = np.random.normal(size=(nobs, ))
C2 = np.random.normal(2, 1, size=(nobs, ))
noise = np.random.normal(size=(nobs, ))
Y = 0.3 +1.2 * C1 - 0.9 * C2 + noise
#self.write2file('RegData.csv', (Y, C1, C2))
#CODE TO PRODUCE BANDWIDTH ESTIMATION IN R
#library(np)
#data <- read.csv('RegData.csv', header=FALSE)
#bw <- npregbw(formula=data$V1 ~ data$V2 + data$V3,
# bwmethod='cv.aic', regtype='lc')
model = nparam.KernelReg(endog=[Y], exog=[C1, C2],
reg_type='lc', var_type='cc', bw='aic')
mean, marg = model.fit()
#R_bw = [0.4017893, 0.4943397] # Bandwidth obtained in R
bw_expected = [0.3987821, 0.50933458]
#npt.assert_allclose(model.bw, bw_expected, rtol=1e-3)
print('bw')
print(model.bw)
print(bw_expected)
print('\nsig_test - default')
print(model.sig_test([1], nboot=100))
t1 = time.time()
res0 = smkr.TestRegCoefC(model, [1])
print('pvalue')
print((res0.t_dist >= res0.test_stat).mean())
print('test statistic', res0.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort0 = np.sort(res0.t_dist)
nrep0 = len(bsort0)
print(bsort0[(probs * nrep0).astype(int)])
t2 = time.time()
print('\nsig_test - pivot=True, nboot=200, nested_res=50')
res1 = smkr.TestRegCoefC(model, [1], pivot=True, nboot=200, nested_res=50)
print('pvalue')
print((res1.t_dist >= res1.test_stat).mean())
print('test statistic', res1.test_stat)
print('bootstrap critical values')
probs = np.array([0.9, 0.95, 0.975, 0.99])
bsort1 = np.sort(res1.t_dist)
nrep1 = len(bsort1)
print(bsort1[(probs * nrep1).astype(int)])
t3 = time.time()
print('times:', t1-t0, t2-t1, t3-t2)
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.plot(x, y, 'o', alpha=0.5)
# ax.plot(x, y_cens, 'o', alpha=0.5)
# ax.plot(x, y_true, lw=2, label='DGP mean')
# ax.plot(x, sm_mean, lw=2, label='model 0 mean')
# ax.plot(x, mean2, lw=2, label='model 2 mean')
# ax.legend()
#
# plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.