repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cython-testbed/pandas | pandas/tests/frame/test_operators.py | 1 | 36793 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import deque
from datetime import datetime
from decimal import Decimal
import operator
import pytest
from numpy import nan
import numpy as np
from pandas.compat import range
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
class TestDataFrameUnaryOperators(object):
# __pos__, __neg__, __inv__
@pytest.mark.parametrize('df,expected', [
(pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
(pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': [True, False]})),
(pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
])
def test_neg_numeric(self, df, expected):
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df, expected', [
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal('1.0'), Decimal('2.0')], [Decimal('-1.0'), Decimal('-2.0')]),
])
def test_neg_object(self, df, expected):
# GH#21380
df = pd.DataFrame({'a': df})
expected = pd.DataFrame({'a': expected})
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_neg_raises(self, df):
with pytest.raises(TypeError):
(- df)
with pytest.raises(TypeError):
(- df['a'])
def test_invert(self):
_seriesd = tm.getSeriesData()
df = pd.DataFrame(_seriesd)
assert_frame_equal(-(df < 0), ~(df < 0))
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': [-1, 1]}),
pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
])
def test_pos_numeric(self, df):
# GH#16073
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
# numpy changing behavior in the future
pytest.param(pd.DataFrame({'a': ['a', 'b']}),
marks=[pytest.mark.filterwarnings("ignore")]),
pd.DataFrame({'a': np.array([-1, 2], dtype=object)}),
pd.DataFrame({'a': [Decimal('-1.0'), Decimal('2.0')]}),
])
def test_pos_object(self, df):
# GH#21380
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_pos_raises(self, df):
with pytest.raises(TypeError):
(+ df)
with pytest.raises(TypeError):
(+ df['a'])
class TestDataFrameLogicalOperators(object):
# &, |, ^
def test_logical_ops_empty_frame(self):
# GH#5808
# empty frames, non-mixed dtype
df = DataFrame(index=[1])
result = df & df
assert_frame_equal(result, df)
result = df | df
assert_frame_equal(result, df)
df2 = DataFrame(index=[1, 2])
result = df & df2
assert_frame_equal(result, df2)
dfa = DataFrame(index=[1], columns=['A'])
result = dfa & dfa
assert_frame_equal(result, dfa)
def test_logical_ops_bool_frame(self):
# GH#5808
df1a_bool = DataFrame(True, index=[1], columns=['A'])
result = df1a_bool & df1a_bool
assert_frame_equal(result, df1a_bool)
result = df1a_bool | df1a_bool
assert_frame_equal(result, df1a_bool)
def test_logical_ops_int_frame(self):
# GH#5808
df1a_int = DataFrame(1, index=[1], columns=['A'])
df1a_bool = DataFrame(True, index=[1], columns=['A'])
result = df1a_int | df1a_bool
assert_frame_equal(result, df1a_int)
def test_logical_ops_invalid(self):
# GH#5808
df1 = DataFrame(1.0, index=[1], columns=['A'])
df2 = DataFrame(True, index=[1], columns=['A'])
with pytest.raises(TypeError):
df1 | df2
df1 = DataFrame('foo', index=[1], columns=['A'])
df2 = DataFrame(True, index=[1], columns=['A'])
with pytest.raises(TypeError):
df1 | df2
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv) # TODO: belongs elsewhere
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
class TestDataFrameOperators(TestData):
@pytest.mark.parametrize('op', [operator.add, operator.sub,
operator.mul, operator.truediv])
def test_operators_none_as_na(self, op):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = DataFrame({col: x[col] == y[col]
for col in x.columns},
index=x.index, columns=x.columns)
assert_frame_equal(result, expected)
result = x != y
expected = DataFrame({col: x[col] != y[col]
for col in x.columns},
index=x.index, columns=x.columns)
assert_frame_equal(result, expected)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ['eq', 'ne']:
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, Timestamp('20010109'))
with pytest.raises(TypeError):
right_f(Timestamp('20010109'), df)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
result = getattr(self.frame, op)('foo')
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
@pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
range(1, 3), deque([1, 2])])
def test_arith_alignment_non_pandas_object(self, values):
# GH 17901
df = DataFrame({'A': [1, 1], 'B': [1, 1]})
expected = DataFrame({'A': [2, 2], 'B': [3, 3]})
result = df + values
assert_frame_equal(result, expected)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].dropna().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
added = self.mixed_float + series
_check_mixed_float(added)
# vs mix (upcast) as needed
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > lst
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError):
df > b_c
with pytest.raises(ValueError):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == lst
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == lst
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isna().all()
assert combined2['D'].isna().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
@pytest.mark.parametrize('op', ['add', 'and', 'div', 'floordiv', 'mod',
'mul', 'or', 'pow', 'sub', 'truediv',
'xor'])
def test_inplace_ops_identity2(self, op):
if compat.PY3 and op == 'div':
return
df = DataFrame({'a': [1., 2., 3.],
'b': [1, 2, 3]})
operand = 2
if op in ('and', 'or', 'xor'):
# cannot use floats for boolean ops
df['a'] = [True, False, True]
df_copy = df.copy()
iop = '__i{}__'.format(op)
op = '__{}__'.format(op)
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64),
range(1, 4)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
def test_no_warning(self, all_arithmetic_operators):
df = pd.DataFrame({"A": [0., 0.], "B": [0., None]})
b = df['B']
with tm.assert_produces_warning(None):
getattr(df, all_arithmetic_operators)(b, 0)
| bsd-3-clause |
rmzoni/python3-training | classificacao/situacao_do_cliente_kfold.py | 1 | 3003 | import pandas as pd
from collections import Counter
import numpy as np
from sklearn.cross_validation import cross_val_score
df = pd.read_csv('situacao_do_cliente.csv')
X_df = df[['recencia', 'frequencia', 'semanas_de_inscricao']]
Y_df = df['situacao']
Xdummies_df = pd.get_dummies(X_df).astype(int)
Ydummies_df = Y_df
X = Xdummies_df.values
Y = Ydummies_df.values
porcentagem_de_treino = 0.8
tamanho_de_treino = int(porcentagem_de_treino * len(Y))
# tamanho_de_validacao = len(Y) - tamanho_de_treino
treino_dados = X[:tamanho_de_treino]
treino_marcacoes = Y[:tamanho_de_treino]
validacao_dados = X[tamanho_de_treino:]
validacao_marcacoes = Y[tamanho_de_treino:]
def fit_and_predict(nome, modelo, treino_dados, treino_marcacoes):
k = 10
scores = cross_val_score(modelo, treino_dados, treino_marcacoes, cv=k)
taxa_de_acerto = np.mean(scores)
msg = "Taxa de acerto do {0}: {1}".format(nome, taxa_de_acerto)
print(msg)
return taxa_de_acerto
def teste_real(modelo, validacao_dados, validacao_marcacoes):
resultado = modelo.predict(validacao_dados)
acertos = resultado == validacao_marcacoes
total_de_acertos = sum(acertos)
total_de_elementos = len(validacao_marcacoes)
taxa_de_acerto = 100.0 * total_de_acertos / total_de_elementos
msg = "Taxa de acerto do vencedor entre os dois algoritmos no mundo real: {0}".format(
taxa_de_acerto)
print(msg)
resultados = {}
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
modeloOneVsRest = OneVsRestClassifier(LinearSVC(random_state=0))
resultadoOneVsRest = fit_and_predict(
"OneVsRest", modeloOneVsRest, treino_dados, treino_marcacoes)
resultados[modeloOneVsRest] = resultadoOneVsRest
from sklearn.multiclass import OneVsOneClassifier
modeloOneVsOne = OneVsOneClassifier(LinearSVC(random_state=0))
resultadoOneVsOne = fit_and_predict(
"OneVsOne", modeloOneVsOne, treino_dados, treino_marcacoes)
resultados[modeloOneVsOne] = resultadoOneVsOne
from sklearn.naive_bayes import MultinomialNB
modeloMultinomial = MultinomialNB()
resultadoMultinomial = fit_and_predict(
"MultinomialNB", modeloMultinomial, treino_dados, treino_marcacoes)
resultados[modeloMultinomial] = resultadoMultinomial
from sklearn.ensemble import AdaBoostClassifier
modeloAdaBoost = AdaBoostClassifier()
resultadoAdaBoost = fit_and_predict(
"AdaBoostClassifier", modeloAdaBoost, treino_dados, treino_marcacoes)
resultados[modeloAdaBoost] = resultadoAdaBoost
vencedor = max(resultados, key=lambda k: resultados[k])
vencedor.fit(treino_dados, treino_marcacoes)
print("Vencerdor: ")
print(vencedor)
teste_real(vencedor, validacao_dados, validacao_marcacoes)
print(Counter(validacao_marcacoes))
acerto_base = max(Counter(validacao_marcacoes).values())
taxa_de_acerto_base = 100.0 * acerto_base / len(validacao_marcacoes)
print("Taxa de acerto base: %f" % taxa_de_acerto_base)
total_de_elementos = len(validacao_dados)
print("Total de teste: %d" % total_de_elementos)
| apache-2.0 |
LLNL/spack | var/spack/repos/builtin/packages/py-sncosmo/package.py | 5 | 1133 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', sha256='f3969eec5b25f60c70418dbd64765a2b4735bb53c210c61d0aab68916daea588')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
| lgpl-2.1 |
shouno/FS_MCMC | Wine/wine_fs_exmcmc.py | 2 | 5496 | #
# -*- coding: utf-8 -*-
#
'''Feature selection for Wine Dataset'''
import numpy as np
from numpy.random import binomial, uniform, permutation
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import LinearSVC
from sklearn.datasets import load_wine
# クラス内においておくと激オソなので外に引っ張り出しておく
def CVScore(sval, X, y, clsf, skf):
'''Definition of 'Energy function', that is a CV score'''
cidx = (sval == 1.0) # 変数選択リストの作成
scrs = []
if cidx.sum() == 0:
return 0.5
for trn, tst in skf.split(X, y):
clsf.fit(X[trn][:, cidx], y[trn])
pred = clsf.predict(X[tst][:, cidx])
scrs.append(- np.sum(y[tst] == pred) / (y[tst].shape[0]))
return np.array(scrs).mean()
# @jit(nopython=True, cache=True)
def MCstep(sval, energy, beta, X, y, clsf, skf):
'''single MC step for trial value'''
size = sval.shape[0]
acccnt = 0
if beta == 0.: # 温度∞ (beta=0.0) は全とっかえ
sval = binomial(1, 0.5, size=size) * 2 - 1.
energy = size * CVScore(sval, X, y, clsf, skf)
acccnt = size
return energy, acccnt
# 有限温度の場合
order = permutation(size)
rvals = uniform(0., 1., size)
for idx in order:
oldE = energy
sval[idx] *= -1
newE = size * CVScore(sval, X, y, clsf, skf)
delta = newE - oldE
pdelta = np.exp(-beta * delta)
if rvals[idx] < pdelta:
# 'accept' the state state
energy = newE
acccnt += 1
else:
# 'reject' restore
sval[idx] *= -1
return energy, acccnt
class FSsingleMC:
'''Feature Selection with Sing MC'''
def __init__(self, dset, beta=1., nsplits=5):
# 識別データセット
self.X = dset['X']
self.y = dset['y']
self.size = self.X.shape[1] # データセットは 列方向に疎性
self.beta = beta
self.nsplits = nsplits
self.s = binomial(1, 0.5, size=self.size) * 2 - 1.
# 識別器とCV用のクラスを内包しておく
self.clsf = LinearSVC(C=1.0)
self.skf = StratifiedKFold(n_splits=nsplits)
# エネルギー関数
self.energy = self.size * CVScore(self.s, self.X, self.y, self.clsf, self.skf)
self.acccnt = 0
class FeatureSelectionEMC:
'''Feature Selextion using EMC'''
def __init__(self, dset, betas=None, nsplits=5, clsf=None):
self.size = dset['X'].shape[1]
if betas is None:
self.nbeta = 12
self.betas = [pow(1.5, l-7+1) for l in range(self.nbeta)] # 決め打ち
self.betas[0] = 0.
self.MCs = [FSsingleMC(dset, beta=beta, nsplits=nsplits) for beta in self.betas]
self.evnset = [(i, i+1, self.MCs[i], self.MCs[i+1]) for i in range(0, self.nbeta-1, 2)]
self.oddset = [(i, i+1, self.MCs[i], self.MCs[i+1]) for i in range(1, self.nbeta-1, 2)]
# @jit(cache=True)
def mcexstep(self, isodd=False):
'''A exchange MC step'''
for mc in self.MCs:
mc.energy, dummy = MCstep(mc.s, mc.energy, mc.beta, mc.X, mc.y, mc.clsf, mc.skf)
mc.acccnt += dummy
# r = [MCstep(mc.s, mc.energy, mc.beta, mc.X, mc.y) for mc in self.MCs]
exlog = np.arange(self.nbeta)
# exchange process
if isodd:
exset = self.oddset
else:
exset = self.evnset
rvals = uniform(0., 1., len(exset))
for (rval, (id1, id2, mc1, mc2)) in zip(rvals, exset):
r = np.exp((mc2.beta - mc1.beta) * (mc2.energy - mc1.energy))
if rval <= r: # accept exchange
(mc1.s, mc2.s) = (mc2.s, mc1.s)
(mc1.energy, mc2.energy) = (mc2.energy, mc1.energy)
(exlog[id1], exlog[id2]) = (exlog[id2], exlog[id1])
return exlog
def trace(self, iterations, reset=False):
'''multiple exmc method for iteration times'''
Es = []
States = []
exlogs = []
if reset is True:
for mc in self.MCs:
mc.acccnt = 0
for it in tqdm(range(iterations)):
exl = self.mcexstep(isodd=bool(it % 2))
exlogs.append(exl)
Es.append([mc.energy for mc in self.MCs])
States.append(np.array([mc.s for mc in self.MCs]))
exlogs = np.array(exlogs).reshape((iterations, self.nbeta))
Es = np.array(Es).reshape((iterations, self.nbeta))
States = np.array(States).reshape((iterations, self.nbeta, self.size))
AccRate = np.array([mc.acccnt/(self.size*iterations) for mc in self.MCs])
return {'Exlog': exlogs, 'Elog': Es, 'Slog': States, 'AccRate': AccRate}
if __name__ == '__main__':
wine = load_wine()
X = wine['data']
y = wine['target']
XX = (X - X.mean(axis=0))/(X.std(axis=0))
yy = np.array(wine['target'] == 0, dtype=np.float)
model = FeatureSelectionEMC(dset={'X': XX, 'y': yy})
burn = model.trace(1000)
mclog = model.trace(1000, reset=True)
np.savez('burnlogB15_12_1000.npz', Betas=model.betas,
Exlog=burn['Exlog'], Elog=burn['Elog'], Slog=burn['Slog'],
AccRate=burn['AccRate'])
np.savez('mclogB15_12_1000.npz', Betas=model.betas,
Exlog=mclog['Exlog'], Elog=mclog['Elog'], Slog=mclog['Slog'],
AccRate=mclog['AccRate'])
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/user_interfaces/mathtext_wx.py | 3 | 4035 | """
Demonstrates how to convert mathtext to a wx.Bitmap for display in various
controls on wxPython.
"""
import matplotlib
matplotlib.use("WxAgg")
from numpy import arange, sin, pi, cos, log
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
IS_GTK = 'wxGTK' in wx.PlatformInfo
IS_WIN = 'wxMSW' in wx.PlatformInfo
IS_MAC = 'wxMac' in wx.PlatformInfo
############################################################
# This is where the "magic" happens.
from matplotlib.mathtext import MathTextParser
mathtext_parser = MathTextParser("Bitmap")
def mathtext_to_wxbitmap(s):
ftimage, depth = mathtext_parser.parse(s, 150)
return wx.BitmapFromBufferRGBA(
ftimage.get_width(), ftimage.get_height(),
ftimage.as_rgba_str())
############################################################
functions = [
(r'$\sin(2 \pi x)$' , lambda x: sin(2*pi*x)),
(r'$\frac{4}{3}\pi x^3$' , lambda x: (4.0 / 3.0) * pi * x**3),
(r'$\cos(2 \pi x)$' , lambda x: cos(2*pi*x)),
(r'$\log(x)$' , lambda x: log(x))
]
class CanvasFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title, size=(550, 350))
self.SetBackgroundColour(wx.NamedColor("WHITE"))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.change_plot(0)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.add_buttonbar()
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.add_toolbar() # comment this out for no toolbar
menuBar = wx.MenuBar()
# File Menu
menu = wx.Menu()
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample")
menuBar.Append(menu, "&File")
if IS_GTK or IS_WIN:
# Equation Menu
menu = wx.Menu()
for i, (mt, func) in enumerate(functions):
bm = mathtext_to_wxbitmap(mt)
item = wx.MenuItem(menu, 1000 + i, "")
item.SetBitmap(bm)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnChangePlot, item)
menuBar.Append(menu, "&Functions")
self.SetMenuBar(menuBar)
self.SetSizer(self.sizer)
self.Fit()
def add_buttonbar(self):
self.button_bar = wx.Panel(self)
self.button_bar_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.button_bar, 0, wx.LEFT | wx.TOP | wx.GROW)
for i, (mt, func) in enumerate(functions):
bm = mathtext_to_wxbitmap(mt)
button = wx.BitmapButton(self.button_bar, 1000 + i, bm)
self.button_bar_sizer.Add(button, 1, wx.GROW)
self.Bind(wx.EVT_BUTTON, self.OnChangePlot, button)
self.button_bar.SetSizer(self.button_bar_sizer)
def add_toolbar(self):
"""Copied verbatim from embedding_wx2.py"""
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if IS_MAC:
self.SetToolBar(self.toolbar)
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.toolbar.update()
def OnPaint(self, event):
self.canvas.draw()
def OnChangePlot(self, event):
self.change_plot(event.GetId() - 1000)
def change_plot(self, plot_number):
t = arange(1.0,3.0,0.01)
s = functions[plot_number][1](t)
self.axes.clear()
self.axes.plot(t, s)
self.Refresh()
class MyApp(wx.App):
def OnInit(self):
frame = CanvasFrame(None, "wxPython mathtext demo app")
self.SetTopWindow(frame)
frame.Show(True)
return True
app = MyApp()
app.MainLoop()
| gpl-2.0 |
cdawei/digbeta | dchen/music/src/PLGEN1_subgrad.py | 2 | 2630 | import os
import sys
import gzip
import time
import numpy as np
import pickle as pkl
from sklearn.metrics import roc_auc_score
from models import MTR_subgrad as MTR
if len(sys.argv) != 7:
print('Usage: python', sys.argv[0],
'WORK_DIR DATASET C1 C2 C3 TRAIN_DEV(Y/N)')
sys.exit(0)
else:
work_dir = sys.argv[1]
dataset = sys.argv[2]
C1 = float(sys.argv[3])
C2 = float(sys.argv[4])
C3 = float(sys.argv[5])
trndev = sys.argv[6]
# assert trndev in ['Y', 'N']
# assert trndev == 'Y'
if trndev != 'Y':
raise ValueError('trndev should be "Y"')
data_dir = os.path.join(work_dir, 'data/%s/setting3' % dataset)
fx = os.path.join(data_dir, 'X.pkl.gz')
fytrain = os.path.join(data_dir, 'Y_train.pkl.gz')
fytest = os.path.join(data_dir, 'Y_test.pkl.gz')
fcliques_train = os.path.join(data_dir, 'cliques_train.pkl.gz')
fcliques_all = os.path.join(data_dir, 'cliques_all.pkl.gz')
fprefix = 'trndev-plgen1-mtr-%g-%g-%g' % (C1, C2, C3)
fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix)
fnpy = os.path.join(data_dir, '%s.npy' % fprefix)
X = pkl.load(gzip.open(fx, 'rb'))
Y_train = pkl.load(gzip.open(fytrain, 'rb'))
Y_test = pkl.load(gzip.open(fytest, 'rb'))
cliques_train = pkl.load(gzip.open(fcliques_train, 'rb'))
cliques_all = pkl.load(gzip.open(fcliques_all, 'rb'))
print('C: %g, %g, %g' % (C1, C2, C3))
print(X.shape, Y_train.shape)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
if os.path.exists(fmodel):
print('evaluating ...')
clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation
else:
print('training ...')
clf = MTR(X, Y_train, C1=C1, C2=C2, C3=C3, cliques=cliques_train)
clf.fit(verbose=2, fnpy=fnpy)
if clf.trained is True:
pkl.dump(clf, gzip.open(fmodel, 'wb'))
pl2u = np.zeros(Y_train.shape[1] + Y_test.shape[1], dtype=np.int)
U = len(cliques_train)
assert len(cliques_all) == U
for u in range(U):
clq = cliques_all[u]
pl2u[clq] = u
assert np.all(clf.pl2u == pl2u[:Y_train.shape[1]])
rps = []
aucs = []
offset = Y_train.shape[1]
for j in range(Y_test.shape[1]):
y_true = Y_test[:, j].A.reshape(-1)
npos = y_true.sum()
assert npos > 0
u = pl2u[j + offset]
wj = clf.V[u, :] + clf.mu
y_pred = np.dot(X, wj).reshape(-1)
sortix = np.argsort(-y_pred)
y_ = y_true[sortix]
rps.append(np.mean(y_[:npos]))
aucs.append(roc_auc_score(y_true, y_pred))
clf.metric_score = (np.mean(rps), np.mean(aucs), len(rps), Y_test.shape[1])
pkl.dump(clf, gzip.open(fmodel, 'wb'))
print('\n%g, %g, %d / %d' % clf.metric_score)
| gpl-3.0 |
drogenlied/qudi | logic/jupyterkernel/qzmqkernel.py | 1 | 35425 | # -*- coding: utf-8 -*-
"""
Qt-based IPython/jupyter kernel
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
------------------------------------------------------------------------------
based on simple_kernel.py (https://github.com/dsblank/simple_kernel)
by Doug Blank <[email protected]>
placed in the public domain, see
https://github.com/dsblank/simple_kernel/issues/5
------------------------------------------------------------------------------
Parts of this file were taken from
https://github.com/ipython/ipython/blob/master/IPython/core/interactiveshell.py
which carries the following attributions:
Copyright (C) 2001 Janko Hauser <[email protected]>
Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
Copyright (C) 2008-2011 The IPython Development Team
Distributed under the terms of the BSD License. The full license is in
the file document documentation/BSDLicense_IPython.md,
distributed as part of this software.
------------------------------------------------------------------------------
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
## General Python imports:
import sys
import json
import hmac
import uuid
import errno
import hashlib
import datetime
import logging
from warnings import warn
import ast
import traceback
import jedi
# zmq specific imports:
import zmq
from io import StringIO
from zmq.error import ZMQError
from .compilerop import CachingCompiler, check_linecache_ipython
from .display_trap import DisplayTrap
from .builtin_trap import BuiltinTrap
from .redirect import redirect_stdout, redirect_stderr
from .stream import QZMQStream
from .helpers import *
from .events import EventManager, available_events
from IPython.core.error import InputRejected
from qtpy import QtCore
class QZMQHeartbeat(QtCore.QObject):
""" Echo Messages on a ZMQ stream. """
def __init__(self, stream):
super().__init__()
self.stream = stream
self.stream.sigMsgRecvd.connect(self.beat)
def beat(self, msg):
""" Send a message back.
@param msg: message to be sent back
"""
logging.debug( "HB: %s" % msg)
if len(msg) > 0:
retmsg = msg[0]
try:
self.stream.socket.send(retmsg)
except zmq.ZMQError as e:
if e.errno != errno.EINTR:
raise
class QZMQKernel(QtCore.QObject):
""" A Qt-based embeddable kernel for Jupyter. """
sigShutdownFinished = QtCore.Signal(str)
supported_mime = (
'text/plain',
'text/html',
'text/markdown',
'text/latex',
'application/json',
'application/javascript',
'image/png',
'image/jpeg',
'image/svg+xml'
)
def __init__(self, config=None):
super().__init__()
self.DELIM = b"<IDS|MSG>"
# namespaces
self.user_global_ns = globals()
self.user_ns = self.user_global_ns
self.exiting = False
self.engine_id = str(uuid.uuid4())
if config is not None:
self.config = config
else:
logging.info( "Starting simple_kernel with default args...")
self.config = {
'control_port' : 0,
'hb_port' : 0,
'iopub_port' : 0,
'ip' : '127.0.0.1',
'key' : str(uuid.uuid4()),
'shell_port' : 0,
'signature_scheme' : 'hmac-sha256',
'stdin_port' : 0,
'transport' : 'tcp'
}
self.hb_thread = QtCore.QThread()
self.hb_thread.setObjectName(self.engine_id)
self.connection = config["transport"] + "://" + config["ip"]
self.secure_key = config["key"].encode('ascii')
self.signature_schemes = {"hmac-sha256": hashlib.sha256}
self.auth = hmac.HMAC(
self.secure_key,
digestmod=self.signature_schemes[self.config["signature_scheme"]])
logging.info('New Kernel {}'.format(self.engine_id))
@QtCore.Slot()
def connect_kernel(self):
# Heartbeat:
self.ctx = zmq.Context()
self.heartbeat_socket = self.ctx.socket(zmq.REP)
self.config["hb_port"] = self.bind(self.heartbeat_socket, self.connection, self.config["hb_port"])
self.heartbeat_stream = QZMQStream(self.heartbeat_socket)
# IOPub/Sub:
# also called SubSocketChannel in IPython sources
self.iopub_socket = self.ctx.socket(zmq.PUB)
self.config["iopub_port"] = self.bind(self.iopub_socket, self.connection, self.config["iopub_port"])
self.iopub_stream = QZMQStream(self.iopub_socket)
self.iopub_stream.sigMsgRecvd.connect(self.iopub_handler)
# Control:
self.control_socket = self.ctx.socket(zmq.ROUTER)
self.config["control_port"] = self.bind(self.control_socket, self.connection, self.config["control_port"])
self.control_stream = QZMQStream(self.control_socket)
self.control_stream.sigMsgRecvd.connect(self.control_handler)
# Stdin:
self.stdin_socket = self.ctx.socket(zmq.ROUTER)
self.config["stdin_port"] = self.bind(self.stdin_socket, self.connection, self.config["stdin_port"])
self.stdin_stream = QZMQStream(self.stdin_socket)
self.stdin_stream.sigMsgRecvd.connect(self.stdin_handler)
# Shell:
self.shell_socket = self.ctx.socket(zmq.ROUTER)
self.config["shell_port"] = self.bind(self.shell_socket, self.connection, self.config["shell_port"])
self.shell_stream = QZMQStream(self.shell_socket)
self.shell_stream.sigMsgRecvd.connect(self.shell_handler)
logging.info( "Config: %s" % json.dumps(self.config))
logging.info( "Starting loops...")
self.heartbeat_handler = QZMQHeartbeat(self.heartbeat_stream)
self.heartbeat_handler.moveToThread(self.hb_thread)
self.hb_thread.start()
self.init_exec_env()
logging.info( "Ready! Listening...")
def init_exec_env(self):
self.execution_count = 1
self.ast_node_interactivity = 'last_expr'
self.compile = CachingCompiler()
self.events = EventManager(self, available_events)
self.ast_transformers = list()
self.displaydata = list()
self.displayhook = DisplayHook()
self.display_trap = DisplayTrap(self.displayhook)
self.builtin_trap = BuiltinTrap()
setup_matplotlib(self)
@QtCore.Slot()
def shutdown(self):
self.iopub_stream.close()
self.stdin_stream.close()
self.shell_stream.close()
self.control_stream.close()
self.heartbeat_stream.close()
self.iopub_socket.close()
self.stdin_socket.close()
self.shell_socket.close()
self.control_socket.close()
self.heartbeat_socket.close()
self.hb_thread.quit()
self.sigShutdownFinished.emit(self.engine_id)
def msg_id(self):
""" Return a new uuid for message id """
return str(uuid.uuid4())
def sign(self, msg_lst):
"""
Sign a message with a secure signature.
"""
h = self.auth.copy()
for m in msg_lst:
h.update(m)
return h.hexdigest().encode('ascii')
def new_header(self, msg_type):
"""make a new header"""
return {
"date": datetime.datetime.now().isoformat(),
"msg_id": self.msg_id(),
"username": "kernel",
"session": self.engine_id,
"msg_type": msg_type,
"version": "5.0",
}
def send(self, stream, msg_type, content=None, parent_header=None, metadata=None, identities=None):
header = self.new_header(msg_type)
if content is None:
content = {}
if parent_header is None:
parent_header = {}
if metadata is None:
metadata = {}
def jencode(msg):
return json.dumps(msg).encode('ascii')
msg_lst = [
jencode(header),
jencode(parent_header),
jencode(metadata),
jencode(content),
]
signature = self.sign(msg_lst)
parts = [self.DELIM,
signature,
msg_lst[0],
msg_lst[1],
msg_lst[2],
msg_lst[3]]
if identities:
parts = identities + parts
logging.debug( "send parts: %s" % parts)
stream.socket.send_multipart(parts)
def display_data(self, mimetype, fmt_dict, metadata=None):
#fmt_dict, md_dict = formatter(mimetype, obj)
dataenc = encode_images(fmt_dict)
if mimetype in self.supported_mime:
content = {
'source': '',
'data': dataenc,
'metadata': {}
}
if metadata is not None:
content['metadata'] = metadata
self.displaydata.append(content)
# Socket Handlers:
def shell_handler(self, msg):
logging.debug( "shell received: %s" % msg)
position = 0
identities, msg = self.deserialize_wire_msg(msg)
# process some of the possible requests:
# execute_request, execute_reply, inspect_request, inspect_reply
# complete_request, complete_reply, history_request, history_reply
# is_complete_request, is_complete_reply, connect_request, connect_reply
# kernel_info_request, kernel_info_reply, shutdown_request, shutdown_reply
if msg['header']["msg_type"] == "execute_request":
self.shell_execute(identities, msg)
elif msg['header']["msg_type"] == "kernel_info_request":
self.shell_kernel_info(identities, msg)
elif msg['header']["msg_type"] == "complete_request":
self.shell_complete(identities, msg)
elif msg['header']["msg_type"] == "history_request":
self.shell_history(identities, msg)
else:
logging.info( "unknown msg_type: %s" % msg['header']["msg_type"])
def shell_execute(self, identities, msg):
logging.debug( "simple_kernel Executing: %s" % msg['content']["code"])
# tell the notebook server that we are busy
content = {
'execution_state': "busy",
}
self.send(self.iopub_stream, 'status', content, parent_header=msg['header'])
# use the code we just got sent as input cell contents
content = {
'execution_count': self.execution_count,
'code': msg['content']["code"],
}
self.send(self.iopub_stream, 'execute_input', content, parent_header=msg['header'])
#capture output
self.displaydata = list()
stream_stdout = StringIO()
stream_stderr = StringIO()
with redirect_stderr(stream_stderr):
with redirect_stdout(stream_stdout):
# actual execution
try:
res = self.run_cell(msg['content']['code'])
except Exception as e:
res = ExecutionResult()
tb = traceback.format_exc()
print('{}\n{}'.format(e, tb))
# send captured output if there is any
res.captured_stdout = stream_stdout.getvalue()
stream_stdout.close()
res.captured_stderr = stream_stderr.getvalue()
stream_stderr.close()
if len(res.captured_stdout) > 0:
content = {
'name': "stdout",
'text': res.captured_stdout,
}
self.send(self.iopub_stream, 'stream', content, parent_header=msg['header'])
if len(res.captured_stderr) > 0:
content = {
'name': "stderr",
'text': res.captured_stderr,
}
self.send(self.iopub_stream, 'stream', content, parent_header=msg['header'])
# send captured result if there is any
if len(res.result) > 0:
content = {
'execution_count': self.execution_count,
'data': {"text/plain": res.result[0]},
'metadata': {}
}
self.send(
self.iopub_stream,
'execute_result',
content,
parent_header=msg['header'])
# output data from this run
for content in self.displaydata:
self.send(
self.iopub_stream,
'display_data',
content,
parent_header=msg['header'])
#tell the notebook server that we are not busy anymore
content = {
'execution_state': "idle",
}
self.send(self.iopub_stream, 'status', content, parent_header=msg['header'])
# publich execution result on shell channel
metadata = {
"dependencies_met": True,
"engine": self.engine_id,
"status": "ok",
"started": datetime.datetime.now().isoformat(),
}
content = {
"status": "ok",
"execution_count": self.execution_count,
"user_variables": {},
"payload": [],
"user_expressions": {},
}
self.send(
self.shell_stream,
'execute_reply',
content,
metadata=metadata,
parent_header=msg['header'],
identities=identities)
self.execution_count += 1
def shell_kernel_info(self, identities, msg):
content = {
"protocol_version": "5.0",
"ipython_version": [1, 1, 0, ""],
"language_version": [0, 0, 1],
"language": "qudi_kernel",
"implementation": "qudi_kernel",
"implementation_version": "1.1",
"language_info": {
"name": "python",
"version": sys.version.split()[0],
'mimetype': "text/x-python",
'file_extension': ".py",
'pygments_lexer': "ipython3",
'codemirror_mode': {
'name': 'ipython',
'version': sys.version.split()[0]
},
'nbconvert_exporter': "python",
},
"banner": "Hue!"
}
self.send(
self.shell_stream,
'kernel_info_reply',
content,
parent_header=msg['header'],
identities=identities)
def shell_history(self, identities, msg):
logging.info( "unhandled history request")
def shell_complete(self, identities, msg):
code = msg['content']['code']
cursor_pos = msg['content']['cursor_pos']
linenr, colnr = cursor_pos_to_lc(code, cursor_pos)
script = jedi.Interpreter(
code,
[self.user_ns, self.user_global_ns],
line=linenr,
column=colnr)
completions = script.completions()
matches = [c.name_with_symbols for c in completions]
rests = [len(c.name_with_symbols) - len(c.complete) for c in completions]
replace_start = cursor_pos - rests[0] if len(rests) > 0 else cursor_pos
content = {
'matches' : matches,
'cursor_start' : replace_start,
'cursor_end' : cursor_pos,
'status' : 'ok'
}
metadata = {}
self.send(
self.shell_stream,
'complete_reply',
content,
metadata=metadata,
parent_header=msg['header'],
identities=identities)
def deserialize_wire_msg(self, wire_msg):
"""split the routing prefix and message frames from a message on the wire"""
delim_idx = wire_msg.index(self.DELIM)
identities = wire_msg[:delim_idx]
m_signature = wire_msg[delim_idx + 1]
msg_frames = wire_msg[delim_idx + 2:]
def jdecode(msg):
return json.loads(msg.decode('ascii'))
m = {}
m['header'] = jdecode(msg_frames[0])
m['parent_header'] = jdecode(msg_frames[1])
m['metadata'] = jdecode(msg_frames[2])
m['content'] = jdecode(msg_frames[3])
check_sig = self.sign(msg_frames)
if check_sig != m_signature:
raise ValueError("Signatures do not match")
return identities, m
def control_handler(self, wire_msg):
# process some of the possible requests:
# execute_request, execute_reply, inspect_request, inspect_reply
# complete_request, complete_reply, history_request, history_reply
# is_complete_request, is_complete_reply, connect_request, connect_reply
# kernel_info_request, kernel_info_reply, shutdown_request, shutdown_reply
logging.debug( "control received: %s" % wire_msg)
identities, msg = self.deserialize_wire_msg(wire_msg)
# Control message handler:
if msg['header']["msg_type"] == "shutdown_request":
self.shutdown()
def iopub_handler(self, msg):
# handle some of these messages:
# stream, display_data, data_pub, execute_input, execute_result
# error, status, clear_output
logging.debug( "iopub received: %s" % msg)
def stdin_handler(self, msg):
# handle some of these messages:
# input_request, input_reply
logging.debug( "stdin received: %s" % msg)
def bind(self, socket, connection, port):
if port <= 0:
return socket.bind_to_random_port(connection)
else:
socket.bind("%s:%s" % (connection, port))
return port
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = ExecutionResult()
if (not raw_cell) or raw_cell.isspace():
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
result.error_before_exec = value
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell')
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
preprocessing_exc_tuple = None
try:
# Static input transformations
#cell = self.input_transformer_manager.transform_cell(raw_cell)
cell = raw_cell
except SyntaxError:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
try:
# restore trailing newline for ast.parse
#cell = self.prefilter_manager.prefilter_lines(cell + '\n'
cell = cell.rstrip('\n') + '\n'
except Exception:
# don't allow prefilter errors to crash IPython
preprocessing_exc_tuple = sys.exc_info()
# Store raw and processed history
if store_history:
pass
#self.history_manager.store_inputs(self.execution_count,
# cell, raw_cell)
if not silent:
pass
#self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[2])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else CachingCompiler()
with self.builtin_trap:
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except IndentationError as e:
self.showindentationerror()
if store_history:
self.execution_count += 1
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError, MemoryError) as e:
self.showsyntaxerror()
if store_history:
self.execution_count += 1
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
if store_history:
self.execution_count += 1
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.pass_result_ref(result)
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
self.run_ast_nodes(
code_ast.body,
cell_name,
interactivity=interactivity,
compiler=compiler,
result=result)
# Reset this so later displayed values do not modify the
# ExecutionResult
#self.displayhook.exec_result = None
self.displayhook.pass_result_ref(None)
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell')
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
#self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' or 'none', specifying which nodes should be
run interactively (displaying output from expressions). 'last_expr'
will run the last node interactively only if it is an expression (i.e.
expressions in loops or other blocks are not displayed. Other values
for this parameter will raise a ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
for i, node in enumerate(to_run_exec):
mod = ast.Module([node])
code = compiler(mod, cell_name, "exec")
if self.run_code(code, result):
return True
for i, node in enumerate(to_run_interactive):
mod = ast.Interactive([node])
code = compiler(mod, cell_name, "single")
if self.run_code(code, result):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def run_code(self, code_obj, result=None):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = 1 # happens in more places, so it's easier as default
try:
try:
#self.hooks.pre_run_code_hook()
#rprint('Running code', repr(code_obj)) # dbg
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", level=1)
#except self.custom_exceptions:
# etype, value, tb = sys.exc_info()
# if result is not None:
# result.error_in_exec = value
# self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback()
else:
outflag = 0
return outflag
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
else:
traceback.print_exception(etype, value, tb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def showsyntaxerror(self, filename=None):
self.showtraceback()
def showindentationerror(self):
self.showtraceback()
##############################################################################
# Main
##############################################################################
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %I:%M:%S %p',
level=logging.INFO)
logging.info( "Loading simple_kernel with args: %s" % sys.argv)
logging.info( "Reading config file '%s'..." % sys.argv[1])
config = json.loads("".join(open(sys.argv[1]).readlines()))
app = QtCore.QCoreApplication(sys.argv)
kernel = QZMQKernel(config)
kernel.sigShutdownFinished.connect(app.quit)
QtCore.QMetaObject.invokeMethod(kernel, 'connect_kernel')
logging.info( "GO!")
app.exec_()
logging.info("Done.")
| gpl-3.0 |
AnasGhrab/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
lucventurini/mikado | Mikado/tests/locus_test.py | 1 | 227708 | # coding: utf-8
"""
Very basic, all too basic test for some functionalities of locus-like classes.
"""
import dataclasses
import operator
import random
import re
import unittest
import os.path
import logging
from collections import namedtuple
from copy import deepcopy
import io
import marshmallow
import pkg_resources
import pytest
from numpy import arange
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Mikado._transcripts.scoring_configuration import NumBoolEqualityFilter, ScoringFile, SizeFilter, RangeFilter, \
InclusionFilter, MinMaxScore, TargetScore
from Mikado._transcripts.transcript_base import Metric
from Mikado.configuration import configurator, MikadoConfiguration, DaijinConfiguration
from Mikado import exceptions
from Mikado.parsers import GFF # ,GTF, bed12
from Mikado.parsers.GTF import GtfLine
from Mikado.transcripts.transcript import Transcript
from Mikado.loci import Superlocus, Abstractlocus, Locus, Monosublocus, MonosublocusHolder, Sublocus, Excluded
from Mikado.loci.locus import pad_transcript
from Mikado.transcripts.reference_gene import Gene
from Mikado.utilities.log_utils import create_null_logger, create_default_logger
from Mikado.utilities.overlap import overlap
import itertools
from Mikado.utilities import Interval
from Mikado import loci
from Mikado.loci.abstractlocus import to_bool
import pickle
import inspect
from Mikado.parsers.bed12 import BED12
import pysam
from pytest import mark
from itertools import combinations_with_replacement
from Mikado.scales.assignment.assigner import Assigner
logging.getLogger("matplotlib").setLevel(logging.WARNING)
class OverlapTester(unittest.TestCase):
def test_overlap(self):
"""
Test for overlap function
:return:
"""
self.assertEqual(Abstractlocus.overlap((100, 200), (100, 200)),
100)
self.assertEqual(Abstractlocus.overlap((100, 200), (100, 200)),
overlap((100, 200), (100, 200)))
class ToBoolTester(unittest.TestCase):
def test_bool(self):
for param in (1, True, "True", "tRue", "true", 1.0):
with self.subTest(param=param):
self.assertEqual(to_bool(param), True)
for param in (0, False, "False", "faLse", "false", 0.0):
with self.subTest(param=param):
self.assertEqual(to_bool(param), False)
for param in (2, 5.5, "hello", dict(), "Falsee", "Trrue"):
with self.subTest(param=param), self.assertRaises((AttributeError, TypeError, ValueError)):
to_bool(param)
class ExcludedTester(unittest.TestCase):
def test_excluded(self):
transcript = Transcript(BED12(
"Chr5\t26603002\t26604376\tID=AT5G66650.1;coding=True;phase=0\t0\t-\t26603203\t26604257\t0\t2\t636,650\t0,724"
))
transcript.finalize()
excluded = Excluded(transcript)
transcript2 = Transcript(BED12(
"Chr5\t26611257\t26612891\tID=AT5G66670.1;coding=True;phase=0\t0\t-\t26611473\t26612700\t0\t2\t1470,46\t0,1588"
))
comparison = Assigner.compare(transcript, transcript2)[0]
self.assertEqual(0, comparison.n_f1[0])
excluded.add_transcript_to_locus(transcript2)
self.assertIn(transcript2.id, excluded)
with self.assertRaises(NotImplementedError):
str(excluded)
with self.assertRaises(NotImplementedError):
excluded.filter_and_calculate_scores()
with self.assertRaises(NotImplementedError):
excluded.define_monosubloci()
with self.assertRaises(NotImplementedError):
excluded.is_intersecting()
excluded.remove_transcript_from_locus(transcript2.id)
self.assertNotIn(transcript2.id, excluded)
self.assertEqual(repr(excluded), "{name}\t{chrom}\t{start}\t{end}\t{strand}\t{transcripts}".format(
name="excluded_transcripts",
chrom=excluded.chrom, start=26603003, end=26604376, strand="-",
transcripts="AT5G66650.1"))
monosub = Monosublocus(transcript2)
excluded.add_monosublocus(monosub)
self.assertIn(transcript2.id, excluded)
# test repr
self.assertEqual(repr(excluded), "{name}\t{chrom}\t{start}\t{end}\t{strand}\t{transcripts}".format(
name="excluded_transcripts",
chrom=excluded.chrom, start=26603003, end=26612891, strand="-",
transcripts="AT5G66650.1,AT5G66670.1"))
class SuperlocusTester(unittest.TestCase):
def test_find_lost_transcripts(self):
conf = MikadoConfiguration()
# With this scoring, we are giving a score of 10 to transcripts with a CDS and a cDNA length >= 300.
# We are giving a negative score to any transcript shorter than 300 bps, coding or not
conf.scoring.scoring = {
"cdna_length": MinMaxScore(rescaling="min", multiplier=-5,
filter=SizeFilter(value=300, operator="le")),
"combined_cds_length": MinMaxScore(rescaling="max", multiplier=10,
filter=SizeFilter(value=300, operator="ge"))
}
conf.scoring.requirements.expression = ["cdna_length"]
conf.scoring.requirements.parameters = {"cdna_length": SizeFilter(value=1, operator="ge")}
conf.scoring.requirements._check_my_requirements()
conf.scoring.cds_requirements.expression = ["cdna_length"]
conf.scoring.cds_requirements.parameters = {"cdna_length": SizeFilter(value=1, operator="ge")}
conf.scoring.cds_requirements._check_my_requirements()
conf.pick.clustering.flank = 500
t1 = Transcript(configuration=conf)
t1.chrom, t1.start, t1.end, t1.strand, t1.id = "Chr1", 1101, 2000, "+", "valid"
t1.add_exons([(1101, 1500), (1601, 2000)])
t1.add_exons([(1201, 1500), (1601, 1900)], features="CDS")
t1.finalize()
self.assertGreater(t1.cdna_length, 300)
self.assertGreater(t1.combined_cds_length, 300)
t2 = Transcript(configuration=conf)
t2.chrom, t2.start, t2.end, t2.strand, t2.id = "Chr1", 700, 900, "+", "invalid.1"
t2.add_exons([(700, 900)])
t2.finalize()
t3 = Transcript(configuration=conf)
t3.chrom, t3.start, t3.end, t3.strand, t3.id = "Chr1", 2100, 2310, "+", "invalid.2"
t3.add_exons([(2100, 2310)])
t3.add_exons([(2100, 2310)], features="CDS")
t3.finalize()
logger = create_default_logger("test_find_lost_transcripts")
sl = Superlocus(t1, configuration=conf, flank=conf.pick.clustering.flank, logger=logger)
sl.add_transcript_to_locus(t2)
sl.add_transcript_to_locus(t3)
self.assertTrue(sorted(sl.transcripts.keys()), sorted([t1.id, t2.id, t3.id]))
sl.filter_and_calculate_scores()
self.assertTrue(sorted(sl.transcripts.keys()), sorted([t1.id, t2.id, t3.id]))
self.assertGreater(sl.scores[t1.id]["score"], 0, sl.scores[t1.id])
self.assertLessEqual(sl.scores[t2.id]["score"], 0)
self.assertLessEqual(sl.scores[t3.id]["score"], 0)
locus = Locus(t1)
sl.loci[locus.id] = locus
sl._find_lost_transcripts()
self.assertEqual(sl.lost_transcripts, dict())
sl.configuration.pick.clustering.purge = False
sl.logger.setLevel("DEBUG")
sl._find_lost_transcripts()
self.assertEqual(sorted(list(sl.lost_transcripts.keys())), sorted([t2.id, t3.id]))
sl.loci = dict()
sl.configuration.pick.clustering.purge = True
sl.define_loci()
self.assertEqual(len(sl.loci), 1)
sl = Superlocus(t1, configuration=conf, flank=conf.pick.clustering.flank, logger=logger)
sl.add_transcript_to_locus(t2)
sl.add_transcript_to_locus(t3)
sl.logger.setLevel("DEBUG")
sl.configuration.pick.clustering.purge = False
sl.define_loci()
self.assertEqual(len(sl.loci), 3)
def test_sl_is_intersecting(self):
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id = "Chr1", 1101, 2000, "+", "multi.1"
t1.add_exons([(1101, 1500), (1601, 2000)])
t1.add_exons([(1201, 1500), (1601, 1900)], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id = "Chr1", 1001, 2200, "+", "multi.2"
t2.add_exons([(1001, 1500), (1601, 2200)])
t2.add_exons([(1201, 1500), (1601, 1900)], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id = "Chr1", 901, 2200, "+", "multi.3"
t3.add_exons([(901, 1500), (1601, 2200)])
t3.finalize()
self.assertEqual(t3.selected_cds_introns, set())
t4 = Transcript()
t4.chrom, t4.start, t4.end, t4.strand, t4.id = "Chr1", 901, 2700, "+", "multi.4"
t4.add_exons([(901, 1500), (1601, 2100), (2201, 2700)])
t4.add_exons([(1801, 2100), (2201, 2500)], features="CDS")
t4.finalize()
self.assertEqual(t4.selected_cds_introns, {(2101, 2200)}, (t4.selected_cds_introns, t4.is_coding))
for prod in itertools.chain(itertools.combinations([t1, t2, t3], 2), [(t3, t4)]):
self.assertTrue(Superlocus.is_intersecting(*prod, cds_only=False))
self.assertTrue(Superlocus.is_intersecting(*prod, cds_only=True))
for prod in [(t1, t4), (t2, t4)]:
self.assertTrue(Superlocus.is_intersecting(*prod, cds_only=False))
self.assertFalse(Superlocus.is_intersecting(*prod, cds_only=True))
t5 = Transcript()
t5.chrom, t5.start, t5.end, t5.strand, t5.id = "Chr1", 1101, 1630, "+", "mono.1"
t5.add_exon((1101, 1630))
t5.add_exon((1101, 1530), feature="CDS")
t5.finalize()
t6 = Transcript()
t6.chrom, t6.start, t6.end, t6.strand, t6.id = "Chr1", 1101, 1530, "+", "mono.2"
t6.add_exon((1101, 1530))
t6.finalize()
t7 = Transcript()
t7.chrom, t7.start, t7.end, t7.strand, t7.id = "Chr1", 1400, 1800, "+", "mono.3"
t7.add_exon((1400, 1800))
t7.add_exon((1591, 1800), feature="CDS")
t7.finalize()
t8 = Transcript()
t8.chrom, t8.start, t8.end, t8.strand, t8.id = "Chr1", 1801, 2110, "+", "mono.4"
t8.add_exon((1801, 2110))
t8.add_exon((1801, 2100), feature="CDS")
t8.finalize()
for mono in [t5, t6, t7]:
self.assertFalse(Superlocus.is_intersecting(mono, t8, cds_only=False))
self.assertFalse(Superlocus.is_intersecting(mono, t8, cds_only=True))
for prod in itertools.combinations([t5, t6, t7], 2):
self.assertTrue(Superlocus.is_intersecting(*prod, cds_only=False))
# CDS-only is activated only when both are coding
if prod[0].is_coding and prod[1].is_coding:
self.assertFalse(Superlocus.is_intersecting(*prod, cds_only=True))
else:
self.assertTrue(Superlocus.is_intersecting(*prod, cds_only=True))
# Now check that monoexonic and multi-exonic transcripts never intersect
for prod in itertools.product([t1, t2, t3, t4], [t5, t6, t7, t8]):
self.assertFalse(Superlocus.is_intersecting(*prod, cds_only=False))
self.assertFalse(Superlocus.is_intersecting(*prod, cds_only=True))
def test_define_graph(self):
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id = "Chr1", 1101, 2000, "+", "multi.1"
t1.add_exons([(1101, 1500), (1601, 2000)])
t1.add_exons([(1201, 1500), (1601, 1900)], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id = "Chr1", 1001, 2200, "+", "multi.2"
t2.add_exons([(1001, 1500), (1601, 2200)])
t2.add_exons([(1201, 1500), (1601, 1900)], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id = "Chr1", 901, 2200, "+", "multi.3"
t3.add_exons([(901, 1500), (1601, 2200)])
t3.finalize()
self.assertEqual(t3.selected_cds_introns, set())
t4 = Transcript()
t4.chrom, t4.start, t4.end, t4.strand, t4.id = "Chr1", 901, 2700, "+", "multi.4"
t4.add_exons([(901, 1500), (1601, 2100), (2201, 2700)])
t4.add_exons([(1801, 2100), (2201, 2500)], features="CDS")
t4.finalize()
self.assertEqual(t4.selected_cds_introns, {(2101, 2200)}, (t4.selected_cds_introns, t4.is_coding))
t5 = Transcript()
t5.chrom, t5.start, t5.end, t5.strand, t5.id = "Chr1", 1101, 1630, "+", "mono.1"
t5.add_exon((1101, 1630))
t5.add_exon((1101, 1530), feature="CDS")
t5.finalize()
t6 = Transcript()
t6.chrom, t6.start, t6.end, t6.strand, t6.id = "Chr1", 1101, 1530, "+", "mono.2"
t6.add_exon((1101, 1530))
t6.finalize()
t7 = Transcript()
t7.chrom, t7.start, t7.end, t7.strand, t7.id = "Chr1", 1400, 1800, "+", "mono.3"
t7.add_exon((1400, 1800))
t7.add_exon((1591, 1800), feature="CDS")
t7.finalize()
t8 = Transcript()
t8.chrom, t8.start, t8.end, t8.strand, t8.id = "Chr1", 1801, 2110, "+", "mono.4"
t8.add_exon((1801, 2110))
t8.add_exon((1801, 2100), feature="CDS")
t8.finalize()
# This is monoexonic for the CDS *but* Multiexonic for the cDNA
t9 = Transcript()
t9.chrom, t9.start, t9.end, t9.strand, t9.id = "Chr1", 1121, 2000, "+", "mono_multi"
t9.add_exons([(1121, 1500), (1601, 2000)])
t9.add_exons([(1121, 1490)], features="CDS")
t9.finalize()
self.assertTrue(t9.is_coding)
sl = Superlocus(t1, flank=2000)
sl.configuration.pick.clustering.cds_only = False
[sl.add_transcript_to_locus(t) for t in [t2, t3, t4, t5, t6, t7, t8, t9]]
self.assertEqual(len(sl.transcripts), 9)
graph = sl.define_graph()
multi_ids = [t.id for t in [t1, t2, t3, t4, t9]]
mono_ids = [t.id for t in [t5, t6, t7, t8]]
self.assertTrue(all(edge in graph.edges() for edge in itertools.combinations(multi_ids, 2)))
self.assertTrue(all(edge in graph.edges() for edge in itertools.combinations(mono_ids[:3], 2)))
self.assertTrue(not any(edge in graph.edges() for edge in itertools.product(mono_ids[:3], [t8.id])))
self.assertTrue(not any(edge in graph.edges() for edge in itertools.product(multi_ids,
mono_ids)))
sl.configuration.pick.clustering.cds_only = True
graph = sl.define_graph()
self.assertFalse(all(edge in graph.edges() for edge in itertools.combinations(multi_ids, 2)))
for comb in itertools.combinations(multi_ids, 2):
if set.issubset(set(comb), {t1.id, t2.id, t3.id}):
self.assertIn(comb, graph.edges())
else:
self.assertNotIn(comb, graph.edges())
for comb in itertools.combinations(mono_ids, 2):
if t5.id in comb and t6.id in comb:
self.assertIn(comb, graph.edges())
else:
self.assertNotIn(comb, graph.edges())
for comb in itertools.product(mono_ids, [t9.id]):
if set.issubset(set(comb), {t5.id, t6.id, t9.id}):
self.assertIn(comb, graph.edges())
else:
self.assertNotIn(comb, graph.edges())
self.assertTrue(not any(edge in graph.edges() for edge in itertools.product([t.id for t in [t1, t2, t3, t4]],
[t9.id] + mono_ids)))
class RefGeneTester(unittest.TestCase):
def setUp(self):
# Chr5 26608315 26608866 ID=AT5G66658.1;coding=True;phase=0 0 + 26608328 26608553 0 1 551 0
# Chr5 26609307 26610504 ID=AT5G66660.1;coding=True;phase=0 0 - 26609307 26610504 0 1 1197 0
# Chr5 26611257 26612889 ID=AT5G66670.2;coding=True;phase=0 0 - 26611473 26612700 0 1 1632 0
# Chr5 26611257 26612891 ID=AT5G66670.1;coding=True;phase=0 0 - 26611473 26612700 0 2 1470,460,1588
self.tothers = Transcript(BED12(
"Chr5 26608315 26608866 ID=AT5G66658.1;coding=True;phase=0 0 + 26608328 26608553 0 1 551 0"
))
self.tothers.parent = "AT5G66658"
self.tout = Transcript(BED12(
"Chr5 26609307 26610504 ID=AT5G66660.1;coding=True;phase=0 0 - 26609307 26610504 0 1 1197 0"
))
self.tout.parent = "AT5G66660"
self.t1 = Transcript(BED12(
"Chr5 26611257 26612889 ID=AT5G66670.2;coding=True;phase=0 0 - 26611473 26612700 0 1 1632 0"
))
self.t1.parent = "AT5G66670"
self.t2 = Transcript(BED12(
"Chr5\t26611257\t26612891\tID=AT5G66670.1;coding=True;phase=0\t0\t-\t26611473\t26612700\t0\t2\t1470,46\t0,1588"
))
self.t2.parent = self.t1.parent[:]
def test_basic(self):
gene = Gene(self.t1)
self.assertEqual(gene.id, self.t1.parent[0])
for attr in ["chrom", "source", "start", "end", "strand"]:
self.assertEqual(getattr(gene, attr), getattr(self.t1, attr))
gene.add(self.t2)
self.assertIn(self.t2.id, gene)
gene.finalize()
self.assertEqual(gene.start, min(self.t1.start, self.t2.start))
self.assertEqual(gene.end, max(self.t1.end, self.t2.end),
(self.t1.end, self.t2.end, max(self.t1.end, self.t2.end)))
with self.assertRaises(TypeError):
gene.logger = "foo"
gene.logger = None
self.assertEqual(gene.logger.name, "gene_{0}".format(gene.id))
gene.id = None
gene.logger = None
self.assertEqual(gene.logger.name, "gene_generic")
del gene.logger
self.assertIs(gene.logger, None)
new_gene = pickle.loads(pickle.dumps(gene))
self.assertEqual(gene, new_gene)
self.assertEqual(gene.transcripts, new_gene.transcripts)
with self.assertRaises(ValueError):
g = gene.format("foo")
def test_less_than(self):
g1 = Gene(self.tothers)
g2 = Gene(self.tout)
g3 = Gene(self.t1)
self.assertLess(g1, g2)
self.assertGreater(g3, g2)
self.assertGreater(g3, g1)
def test_deletion(self):
gene = Gene(self.t1)
self.assertEqual(gene.id, self.t1.parent[0])
for attr in ["chrom", "source", "start", "end", "strand"]:
self.assertEqual(getattr(gene, attr), getattr(self.t1, attr))
gene.add(self.t2)
self.assertIn(self.t2.id, gene)
gene.finalize()
gene.remove(self.t2.id)
self.assertEqual((gene.start, gene.end), (self.t1.start, self.t1.end))
gene.remove(self.t1.id)
self.assertEqual((gene.start, gene.end), (None, None))
def test_different_strand(self):
gene = Gene(self.t1)
with self.assertRaises(AssertionError):
gene.add(self.tothers)
self.tout.unfinalize()
self.tout.chrom = "Chr2"
self.tout.finalize()
with self.assertRaises(AssertionError):
gene.add(self.tout)
def test_non_coding(self):
gene = Gene(self.t1)
self.t2.strip_cds()
gene.add(self.t2)
self.assertIn(self.t2.id, gene)
gene.finalize()
self.assertIn(self.t2.id, gene)
gene = Gene(self.t1, only_coding=True)
self.t2.strip_cds()
gene.add(self.t2)
self.assertIn(self.t2.id, gene)
gene.finalize()
self.assertNotIn(self.t2.id, gene)
def test_properties(self):
gene = Gene(self.t2)
self.assertFalse(gene.has_monoexonic)
gene.add(self.t1)
self.assertTrue(gene.has_monoexonic)
gene = Gene(self.t2)
self.t1.unfinalize()
exon = self.t1.exons[0]
self.t1.remove_exon(exon)
row = GtfLine("#")
row.chrom, row.strand, row.start, row.end, row.feature, row.transcript, row.gene = (
self.t1.chrom, self.t1.strand, exon[0], exon[1], "exon", self.t1.id, self.t1.parent[0]
)
gene.add(self.t1)
gene.add_exon(row)
gene.finalize()
self.assertIn(exon, gene[self.t1.id].exons)
self.assertEqual(gene.exons, {exon} | set(self.t2.exons))
self.assertEqual(gene.introns, self.t2.introns)
class AbstractLocusTester(unittest.TestCase):
@pytest.fixture(autouse=True)
def inject_fixtures(self, caplog):
self._caplog = caplog
def setUp(self):
gff_transcript1 = """Chr1\tfoo\ttranscript\t101\t400\t.\t+\t.\tID=t0
Chr1\tfoo\texon\t101\t400\t.\t+\t.\tID=t0:exon1;Parent=t0
Chr1\tfoo\tCDS\t101\t350\t.\t+\t.\tID=t0:exon1;Parent=t0""".split("\n")
gff_transcript1 = [GFF.GffLine(x) for x in gff_transcript1]
self.assertEqual(gff_transcript1[0].chrom, "Chr1", gff_transcript1[0])
self.transcript1 = Transcript(gff_transcript1[0])
for exon in gff_transcript1[1:]:
self.transcript1.add_exon(exon)
self.transcript1.finalize()
gff_transcript2 = """Chr1\tfoo\ttranscript\t1001\t1400\t.\t+\t.\tID=t0
Chr1\tfoo\texon\t1001\t1400\t.\t+\t.\tID=t0:exon1;Parent=t0
Chr1\tfoo\tCDS\t1001\t1350\t.\t+\t.\tID=t0:exon1;Parent=t0""".split("\n")
gff_transcript2 = [GFF.GffLine(x) for x in gff_transcript2]
self.assertEqual(gff_transcript2[0].chrom, "Chr1", gff_transcript2[0])
self.transcript2 = Transcript(gff_transcript2[0])
for exon in gff_transcript2[1:]:
self.transcript2.add_exon(exon)
self.transcript2.finalize()
self.assertTrue(self.transcript1.monoexonic)
self.assertEqual(self.transcript1.chrom, gff_transcript1[0].chrom)
self.assertTrue(self.transcript2.monoexonic)
self.assertEqual(self.transcript2.chrom, gff_transcript2[0].chrom)
self.configuration = configurator.load_and_validate_config(None)
self.assertIsNotNone(self.configuration.scoring, self.configuration)
self.transcript1.configuration = self.configuration
self.transcript2.configuration = self.configuration
self.assertEqual(self.transcript1.configuration.seed, self.transcript2.configuration.seed)
def test_create_metrics_row(self):
transcript = Transcript()
transcript.chrom, transcript.start, transcript.end, transcript.strand = "Chr1", 101, 1000, "+"
transcript.add_exons([(101, 500), (601, 1000)])
transcript.add_exons([(201, 500), (601, 900)], features="CDS")
transcript.id = "foo"
transcript.finalize()
transcript.is_reference = True
for locus in [Locus, Sublocus, Superlocus]:
for score, tpm in itertools.product([None, 10, 1.56789], [None, 10, 2.5689]):
transcript.score = score
transcript.external_scores["tpm"] = [tpm]
instance = locus(transcript)
# locus.scores[transcript.id] = score
instance.get_metrics()
instance.scores_calculated = True
instance.scores[transcript.id] = {"score": score}
self.assertTrue(instance.scores_calculated)
self.assertEqual(transcript.score, score)
row = instance._create_metrics_row(transcript.id, instance._metrics[transcript.id],
transcript)
self.assertEqual(transcript.score, score)
self.assertIsInstance(row, dict)
self.assertIn("tid", row.keys())
if locus == Locus:
self.assertEqual(row["alias"], transcript.id)
else:
self.assertEqual(row["tid"], transcript.id)
self.assertIn("external.tpm", row)
if tpm is None:
self.assertEqual(row["external.tpm"], "NA")
elif tpm == 10:
self.assertEqual(row["external.tpm"], 10)
elif tpm == 2.5689:
self.assertEqual(row["external.tpm"], 2.57)
if score is None:
self.assertEqual(row["score"], "NA", locus)
elif score == 10:
self.assertEqual(row["score"], 10, locus)
else:
self.assertEqual(row["score"], 1.57, locus)
def test_removal(self):
for cls in [Superlocus, Sublocus, Monosublocus, Locus]:
obj = cls(self.transcript1, configuration=self.configuration)
if cls != Monosublocus:
obj.add_transcript_to_locus(self.transcript2, check_in_locus=False)
self.assertEqual(obj.start, min(self.transcript2.start, self.transcript1.start))
self.assertEqual(obj.chrom, "Chr1")
obj._remove_all()
self.assertEqual(obj.chrom, None)
self.assertEqual(obj.start, float("Inf"))
self.assertEqual(obj.end, float("-Inf"))
self.assertEqual(len(obj.transcripts), 0)
def test_invalid_conf(self):
for cls in [Superlocus, Sublocus, Monosublocus, Locus]:
for invalid in ([], ("hello",), 10, "I_do_not_exist.yaml"):
with self.subTest(cls=cls, invalid=invalid), self.assertRaises(exceptions.InvalidConfiguration):
cls(self.transcript1, configuration=invalid)
def test_not_implemented(self):
with self.assertRaises(TypeError):
_ = Abstractlocus(self.transcript1)
def test_equality(self):
for child1, child2 in combinations_with_replacement([Superlocus, Sublocus, Monosublocus, Locus], 2):
obj1, obj2 = (child1(self.transcript1, configuration=self.configuration),
child2(self.transcript1, configuration=self.configuration))
if child1 == child2:
self.assertEqual(obj1, obj2)
else:
self.assertNotEqual(obj1, obj2)
def test_less_than(self):
for child in [Superlocus, Sublocus, Monosublocus, Locus]:
child1, child2 = (child(self.transcript1), child(self.transcript2))
self.assertLess(child1, child2)
self.assertLessEqual(child1, child2)
self.assertLessEqual(child1, child1)
self.assertLessEqual(child2, child2)
self.assertGreater(child2, child1)
self.assertGreaterEqual(child2, child1)
self.assertGreaterEqual(child2, child2)
self.assertGreaterEqual(child1, child1)
def test_invalid_logger(self):
for locus in [Locus, Sublocus, Superlocus, Monosublocus, MonosublocusHolder]:
l = locus(None)
for invalid in [10, dict(), "hello", 5.0]:
with self.subTest(invalid=invalid):
with self.assertRaises(TypeError):
l.logger = invalid
l.logger = None
self.assertIs(l.logger, create_null_logger())
def test_invalid_source(self):
for locus in [Locus, Sublocus, Superlocus, Monosublocus, MonosublocusHolder]:
l = locus(None)
for invalid in [10, dict(), b"hello", 5.0]:
with self.subTest(invalid=invalid):
with self.assertRaises(AssertionError):
l.source = invalid
l.source = None
self.assertEqual(l.source, "Mikado")
l.source = "Test"
self.assertEqual(l.source, "Test")
def test_invalid_stranded(self):
for locus in [Locus, Sublocus, Superlocus, Monosublocus, MonosublocusHolder]:
l = locus(None)
for invalid in [10, dict(), b"hello", 5.0]:
with self.subTest(invalid=invalid):
with self.assertRaises(ValueError):
l.stranded = invalid
l.stranded = False
self.assertFalse(l.stranded)
l.stranded = True
self.assertTrue(l.stranded)
def test_invalid_flank(self):
for locus in [Locus, Sublocus, Superlocus, Monosublocus, MonosublocusHolder]:
l = locus(None)
for invalid in [-10, dict(), b"hello", 5.0]:
with self.subTest(invalid=invalid):
with self.assertRaises(TypeError):
l.flank = invalid
for valid in [0, 100, 1000]:
l.flank = valid
self.assertEqual(l.flank, valid)
def test_calculate_score_empty_locus(self):
logger = create_default_logger("test_calculate_score_empty_locus", level="DEBUG")
for locus in [Locus, Sublocus, Superlocus, Monosublocus, MonosublocusHolder]:
l = locus(None, logger=logger)
for metric in l.available_metrics:
with self.assertLogs(logger, level="DEBUG") as cmo:
l._calculate_score(metric)
self.assertTrue(any([re.search(r"No transcripts in.*\. Returning.", out) is not None for out in
cmo.output]))
def test_get_denominator(self):
# _get_denominator(param: Union[MinMaxScore, TargetScore],
# use_raw: str, metrics: dict) -> (Union[float,int], Union[float, int, bool])
min_score = MinMaxScore(rescaling="min", filter=None)
metrics = {"foo.1": .5, "foo.2": .1, "foo.3": 1}
self.assertEqual(Abstractlocus._get_denominator(min_score, use_raw=False, metrics=dict()), (1, None))
self.assertEqual(Abstractlocus._get_denominator(min_score, use_raw=True, metrics=dict()), (1, None))
self.assertEqual(Abstractlocus._get_denominator(min_score, use_raw=True, metrics=metrics),
(-1, None))
self.assertEqual(Abstractlocus._get_denominator(min_score, use_raw=False, metrics=metrics),
(.9, None))
max_score = MinMaxScore(rescaling="max", filter=None)
self.assertEqual(Abstractlocus._get_denominator(max_score, use_raw=True, metrics=dict()),
(1, None))
self.assertEqual(Abstractlocus._get_denominator(max_score, use_raw=False, metrics=dict()),
(1, None))
self.assertEqual(Abstractlocus._get_denominator(max_score, use_raw=True, metrics=metrics),
(1, None))
self.assertEqual(Abstractlocus._get_denominator(max_score, use_raw=False, metrics=metrics),
(.9, None))
target_score = TargetScore(value=.5, rescaling="target", filter=None)
self.assertEqual(Abstractlocus._get_denominator(target_score, use_raw=True, metrics=dict()),
(1, .5))
self.assertEqual(Abstractlocus._get_denominator(target_score, use_raw=False, metrics=dict()),
(1, .5))
self.assertEqual(Abstractlocus._get_denominator(target_score, use_raw=True, metrics=metrics),
(.5, .5))
self.assertEqual(Abstractlocus._get_denominator(target_score, use_raw=False, metrics=metrics),
(.5, .5))
def test_get_score_for_metric(self):
# _get_score_for_metric(tid_metric, use_raw, target, denominator, param, min_val, max_val)
for invalid in [10, b"10", dict(), 100.0]:
with self.assertRaises(ValueError):
Abstractlocus._get_score_for_metric(invalid, use_raw=True,
target=None, denominator=1, param="fraction",
min_val=0, max_val=1)
max_score = MinMaxScore(rescaling="max", filter=None)
min_score = MinMaxScore(rescaling="min", filter=None)
target_score = TargetScore(rescaling="target", value=5, filter=None)
for num, denominator, rescaling in itertools.product(
arange(0, 1.05, .05), arange(.5, 10, .5), [max_score, min_score, target_score]):
if rescaling.rescaling == "target":
with self.assertRaises(ValueError) as exc:
Abstractlocus._get_score_for_metric(num, use_raw=True,
target=None, denominator=denominator, param=rescaling,
min_val=0, max_val=1)
else:
self.assertEqual(Abstractlocus._get_score_for_metric(num, use_raw=True,
target=None, denominator=denominator, param=rescaling,
min_val=0, max_val=1), round(num / denominator, 2))
min_val = 0
max_val = 1.05
for num, denominator, multiplier in itertools.product(
list(arange(0, 1.05, .05)) + [True, False], arange(.5, 10, .5),
arange(.5, 10, .5)):
max_score = MinMaxScore(rescaling="max", filter=None, multiplier=multiplier)
min_score = MinMaxScore(rescaling="min", filter=None, multiplier=multiplier)
max_result = Abstractlocus._get_score_for_metric(num, use_raw=False,
target=None, denominator=denominator, param=max_score,
min_val=min_val, max_val=max_val)
self.assertEqual(max_result, round(multiplier * abs((num - min_val) / denominator), 2))
min_result = Abstractlocus._get_score_for_metric(num, use_raw=False,
target=None, denominator=denominator, param=min_score,
min_val=min_val, max_val=max_val)
self.assertEqual(min_result, round(multiplier * abs(1 - (num - min_val) / denominator), 2),
(num, denominator, multiplier))
for num, denominator, target, multiplier in itertools.product(
list(arange(0, 1.05, .05)) + [True, False],
arange(.5, 10, .5), arange(.05, 1.05, .5), arange(.5, 10, .5)):
num, denominator, target, multiplier = float(num), float(denominator), float(target), float(multiplier)
target_score = TargetScore(rescaling="target", filter=None, multiplier=multiplier, value=target)
target_result = Abstractlocus._get_score_for_metric(num, use_raw=False,
target=target, denominator=denominator,
param=target_score,
min_val=min_val, max_val=max_val)
# score = 1 - abs(tid_metric - target) / denominator
expected = round(multiplier * (1 - abs((num - target) / denominator)), 2)
self.assertEqual(target_result, expected, (num, denominator, multiplier, target))
def test_check_usable_raw(self):
# _check_usable_raw(transcript, param, use_raw, rescaling, logger=create_null_logger())
for invalid in [b"cdna_length", 10, None, dict()]:
with self.assertRaises(TypeError) as exc:
Abstractlocus._check_usable_raw(None, invalid, False, "max")
self.assertIsNotNone(re.search(r"Invalid type of parameter", str(exc.exception)))
with self.assertRaises(TypeError) as exc:
Abstractlocus._check_usable_raw(invalid, "cdna_length", False, "max")
self.assertIsNotNone(re.search(r"Invalid transcript type", str(exc.exception)))
t = Transcript()
t.chrom, t.start, t.end, t.strand, t.id = "Chr1", 101, 1000, "+", "foo"
t.add_exons([(101, 500), (601, 1000)])
t.add_exons([(201, 500), (601, 900)], features="CDS")
t.finalize()
t.external_scores.tpm = [10, False]
t.external_scores.fraction = [.5, True]
t.attributes["FPKM"] = 10
logger = create_default_logger("test_check_usable_raw", level="WARNING")
for param in t.get_available_metrics():
for use_raw, rescaling in itertools.product([False, True], ["min", "max", "target"]):
metr = getattr(Transcript, param)
if not isinstance(metr, Metric):
continue
usable_raw = metr.usable_raw
if use_raw is True and (usable_raw is False or rescaling == "target"):
with self.assertLogs(logger, level="WARNING") as cmo:
found_use_raw = Abstractlocus._check_usable_raw(t, param, use_raw=use_raw, rescaling=rescaling,
logger=logger)
self.assertTrue(any(re.search(r"Switching to False", out) for out in cmo.output))
self.assertFalse(found_use_raw)
else:
found_use_raw = Abstractlocus._check_usable_raw(t, param, use_raw=use_raw, rescaling=rescaling,
logger=logger)
self.assertEqual(use_raw, found_use_raw)
# Now external things
found_use_raw = Abstractlocus._check_usable_raw(t, "external.tpm",
use_raw=False, rescaling="target", logger=logger)
self.assertFalse(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "external.tpm",
use_raw=True, rescaling="max", logger=logger)
self.assertFalse(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "external.fraction",
use_raw=False, rescaling="target", logger=logger)
self.assertFalse(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "external.fraction",
use_raw=True, rescaling="max", logger=logger)
self.assertTrue(found_use_raw)
# Now attributes. Attributes are just returning use_raw
found_use_raw = Abstractlocus._check_usable_raw(t, "attributes.FPKM",
use_raw=False, rescaling="max", logger=logger)
self.assertFalse(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "attributes.FPKM",
use_raw=True, rescaling="max", logger=logger)
self.assertTrue(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "attributes.FPKM",
use_raw=False, rescaling="target", logger=logger)
self.assertFalse(found_use_raw)
found_use_raw = Abstractlocus._check_usable_raw(t, "attributes.FPKM",
use_raw=True, rescaling="target", logger=logger)
self.assertFalse(found_use_raw)
def test_get_param_metrics(self):
""""""
param = "combined_cds_length"
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id = "Chr1", 101, 1000, "+", "foo"
# cDNA 800 bps, CDS 60 bps
t1.add_exons([(101, 500), (601, 1000)])
t1.add_exons([(471, 500), (601, 630)], features="CDS")
t1.finalize()
# cDNA 210 bps, CDS 210 bps
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id = "Chr1", 451, 710, "+", "bar"
t2.add_exons([(451, 500), (551, 710)])
t2.add_exons([(451, 500), (551, 710)], features="CDS")
t2.finalize()
self.assertEqual(operator.attrgetter(param)(t1), t1.combined_cds_length)
self.assertEqual(operator.attrgetter(param)(t2), t2.combined_cds_length)
self.assertEqual(t1.combined_cds_length, 60)
self.assertEqual(t2.combined_cds_length, 210)
transcripts = {"foo": t1, "bar": t2}
metrics = {t1.id: {param: t1.combined_cds_length},
t2.id: {param: t2.combined_cds_length}}
param_conf = MinMaxScore(rescaling="max", filter=None)
# First case: raise errors if the dictionary is not a dictionary ...
for invalid in [None, 10, b"20", list()]:
with self.assertRaises(AttributeError):
Abstractlocus._get_param_metrics(invalid, metrics, param, param_conf)
with self.assertRaises((AttributeError, TypeError)):
Abstractlocus._get_param_metrics(transcripts, invalid, param, param_conf)
with self.assertRaises((AttributeError, TypeError)):
Abstractlocus._get_param_metrics(transcripts, metrics, invalid, param_conf)
with self.assertRaises((AttributeError, TypeError)):
Abstractlocus._get_param_metrics(transcripts, metrics, param, invalid)
# Second case: no filter. Presume that the metrics come back.
param_metrics, restored_metrics = Abstractlocus._get_param_metrics(transcripts, dict(), param, param_conf)
self.assertEqual(restored_metrics, metrics)
self.assertEqual({t1.id: t1.combined_cds_length, t2.id: t2.combined_cds_length}, param_metrics)
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual({t1.id: t1.combined_cds_length, t2.id: t2.combined_cds_length},
param_metrics)
# Third case. Now we have a filter but without a name, so we are presuming that it applies to
# combined_cds_length.
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
value=150))
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual(param_metrics, {t2.id: t2.combined_cds_length})
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
value=300))
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual(param_metrics, {})
# Fourth case, now we are filtering on a different parameter, cdna_length
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric="cdna_length",
value=10))
self.assertEqual(param_conf.filter.metric, "cdna_length")
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual({t1.id: t1.combined_cds_length, t2.id: t2.combined_cds_length},
param_metrics)
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric="cdna_length",
value=300))
self.assertEqual(param_conf.filter.metric, "cdna_length")
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual({t1.id: t1.combined_cds_length},
param_metrics)
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric="cdna_length",
value=2000))
param_metrics, _ = Abstractlocus._get_param_metrics(transcripts, metrics, param, param_conf)
self.assertEqual({}, param_metrics)
# Fifth case. Considering external and attributes
t1.external_scores["tpm"] = [10, False]
t1.external_scores["fraction"] = [.5, True]
t1.attributes["FPKM"] = 10
t2.external_scores["tpm"] = [5, False]
t2.external_scores["fraction"] = [1, True]
t2.attributes["FPKM"] = 50
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric="attributes.FPKM",
value=5))
param_metrics, restored_metrics = Abstractlocus._get_param_metrics(transcripts, metrics,
"external.tpm", param_conf)
self.assertEqual(param_metrics, {t1.id: t1.external_scores.tpm[0],
t2.id: t2.external_scores.tpm[0]})
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric="attributes.FPKM",
value=50))
param_metrics, restored_metrics = Abstractlocus._get_param_metrics(transcripts, metrics,
"external.tpm", param_conf)
self.assertEqual(param_metrics, {})
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="gt",
metric=None,
value=5))
param_metrics, restored_metrics = Abstractlocus._get_param_metrics(transcripts, metrics,
"external.tpm", param_conf)
self.assertEqual(param_metrics, {t1.id: t1.external_scores.tpm[0]})
param_conf = MinMaxScore(rescaling="max", filter=SizeFilter(operator="lt",
metric="external.fraction",
value=.6))
param_metrics, restored_metrics = Abstractlocus._get_param_metrics(transcripts, metrics,
"external.tpm", param_conf)
self.assertEqual(param_metrics, {t1.id: t1.external_scores.tpm[0]})
def test_score(self):
t = Transcript()
t.chrom, t.start, t.end, t.id, t.score = "Chr1", 101, 1000, "foo", None
t.add_exons([(101, 1000)])
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.id, t2.score = "Chr1", 101, 1000, "foo2", 100
t2.add_exons([(101, 1000)])
for locus in [Locus, Sublocus, Superlocus]:
l = locus(None)
self.assertIsNone(l.score)
l = locus(t)
self.assertEqual(l.score, 0)
l.add_transcript_to_locus(t2, check_in_locus=False)
self.assertEqual(l.score, 100)
for invalid in [10, -5, dict(), None]:
with self.assertRaises(ValueError):
l._use_transcript_scores = invalid
for valid in [False, True]:
l._use_transcript_scores = valid
self.assertEqual(valid, l._use_transcript_scores)
self.assertEqual(valid, l.scores_calculated)
def test_serialisation(self):
engine = create_engine("sqlite://")
maker = sessionmaker(bind=engine)
session = maker()
for child in [Superlocus, Sublocus, Monosublocus, Locus, Excluded]:
with self.subTest(child=child):
child1 = child(self.transcript1)
child1.session = session
child1.sessionmaker = sessionmaker
# Check compiled in dictionary
self.assertIsInstance(child1.configuration, (MikadoConfiguration, DaijinConfiguration))
obj = pickle.dumps(child1)
nobj = pickle.loads(obj)
self.assertEqual(child1, nobj)
obj2 = child1.as_dict()
self.assertIsInstance(obj2, dict)
obj3 = child1.as_dict()
self.assertEqual(obj2, obj3)
nobj2 = child(None)
nobj2.load_dict(obj2)
self.assertEqual(child1, nobj2)
# Now try to inject a daijin configuration
d = DaijinConfiguration()
obj2["json_conf"] = dataclasses.asdict(d)
assert isinstance(obj2["json_conf"], dict)
nobj2b = child(None)
nobj2b.load_dict(obj2, load_configuration=True)
self.assertIsInstance(nobj2b.configuration, DaijinConfiguration)
# Now avoid loading the configuration altogether
nobj2b = child(None, configuration=MikadoConfiguration())
seed = nobj2b.configuration.seed = child1.configuration.seed + 1
obj2["json_conf"] = dataclasses.asdict(d)
nobj2b.load_dict(obj2, load_configuration=False)
self.assertEqual(nobj2b.configuration.seed, seed)
self.assertIsInstance(nobj2b.configuration, MikadoConfiguration)
# Finally let's validate that an invalid configuration would be found out
obj2["json_conf"] = {"I am an invalid": "dictionary"}
with self.assertRaises(marshmallow.exceptions.MarshmallowError):
nobj2b = child(None)
nobj2b.load_dict(obj2, load_configuration=True)
def test_slocus_dicts(self):
self.assertEqual(self.transcript1.configuration.seed, self.transcript2.configuration.seed)
locus = Superlocus(self.transcript1)
locus.add_transcript_to_locus(self.transcript2, check_in_locus=False)
locus.subloci = [Sublocus(self.transcript1)]
l = Locus(self.transcript1)
locus.loci = {l.id: l}
ml = MonosublocusHolder(Monosublocus(self.transcript1))
locus.monoholders = [ml]
locus.excluded = Excluded(self.transcript2, configuration=locus.configuration)
conf = locus.configuration.copy()
_without = locus.as_dict(with_subloci=False, with_monoholders=False)
self.assertEqual(_without["subloci"], [])
self.assertEqual(_without["monoholders"], [])
self.assertEqual(_without["excluded"], locus.excluded.as_dict())
self.assertEqual(_without["loci"], {l.id: l.as_dict()})
_with = locus.as_dict(with_subloci=True, with_monoholders=True)
self.assertIsNotNone(_with["json_conf"]["seed"])
self.assertEqual(_with["json_conf"]["seed"], conf.seed)
self.assertEqual(_with["subloci"], [locus.subloci[0].as_dict()])
self.assertEqual(_with["monoholders"], [ml.as_dict()])
self.assertEqual(conf.seed, locus.configuration.seed)
self.assertEqual(conf.seed, self.transcript2.configuration.seed)
excl = Excluded(self.transcript2, configuration=conf)
self.assertEqual(excl.configuration.seed, locus.configuration.seed)
self.assertEqual(_with["excluded"], Excluded(self.transcript2, configuration=conf).as_dict())
self.assertEqual(_with["loci"], {l.id: l.as_dict()})
self.assertIsInstance(_with["json_conf"], dict)
# Now test the reloading
# def load_dict(self, state, print_subloci=True, print_monoloci=True, load_transcripts=True,
# load_configuration=True):
conf.threads = 10
self.assertNotEqual(conf, locus.configuration)
num = 0
for print_subloci in (True, False):
for print_monoloci in (True, False):
for load_transcripts in (True, False):
for load_configuration in (True, False):
with self.subTest(
print_subloci=print_subloci,
print_monoloci=print_monoloci,
load_transcripts=load_transcripts,
load_configuration=load_configuration
):
num += 1
test = Superlocus(None, configuration=conf)
self.assertIsInstance(_with["json_conf"], dict, num)
test.load_dict(
deepcopy(_with), print_subloci=print_subloci, print_monoloci=print_monoloci,
load_transcripts=load_transcripts, load_configuration=load_configuration)
if load_configuration is False:
self.assertEqual(conf, test.configuration)
else:
self.assertEqual(locus.configuration, test.configuration)
if load_transcripts is True:
self.assertEqual(locus.transcripts, test.transcripts)
else:
self.assertEqual(test.transcripts, dict((tid, transcript.as_dict())
for tid, transcript in
locus.transcripts.items()))
if print_subloci is True:
self.assertEqual(locus.excluded, test.excluded)
self.assertEqual(locus.subloci, test.subloci)
else:
self.assertEqual(test.subloci, [])
if print_monoloci is True:
self.assertEqual(test.monoholders, locus.monoholders)
else:
self.assertEqual(test.monoholders, [])
def test_evaluate(self):
"""Test to verify the abstractlocus.evaluate static method"""
# param: Union[str, int, bool, float],
# conf: Union[SizeFilter, InclusionFilter, NumBoolEqualityFilter, RangeFilter]
# Params: eq, ne, gt, ge, lt, le, in, not in, within, not within
# eq, ne
for val in (2, 3.0, True):
for oval in (2, 3.0, True, 4.0, 5, False):
for op in ["ne", "eq"]:
filt = NumBoolEqualityFilter.Schema().load({"value": val, "operator": op})
for obj in [Superlocus, Locus, Sublocus, Monosublocus, MonosublocusHolder]:
result = obj.evaluate(oval, filt)
if (val == oval and op == "eq") or (val != oval and op == "ne"):
self.assertTrue(result)
else:
self.assertFalse(result)
# ge, lt, le, gt
for val in (2, 3.0, 4):
for oval in (2, 3, 4, 5):
for op in ["gt", "ge", "lt", "le"]:
filt = SizeFilter.Schema().load({"value": val, "operator": op})
for obj in [Superlocus, Locus, Sublocus, Monosublocus, MonosublocusHolder]:
result = obj.evaluate(oval, filt)
if op == "lt":
self.assertEqual(oval < val, result, (oval, val, op))
elif op == "le":
self.assertEqual(oval <= val, result, (oval, val, op))
elif op == "ge":
self.assertEqual(oval >= val, result, (oval, val, op))
else: # "gt"
self.assertEqual(oval > val, result, (oval, val, op))
# within, not within
within = RangeFilter.Schema().load({"operator": "within", "value": [20, 10]})
without = RangeFilter.Schema().load({"operator": "not within", "value": [20, 10]})
for obj in [Superlocus, Locus, Sublocus, Monosublocus, MonosublocusHolder]:
self.assertTrue(obj.evaluate(15, within))
self.assertTrue(obj.evaluate(5, without))
self.assertFalse(obj.evaluate(15, without))
self.assertFalse(obj.evaluate(5, within))
# in, not in
within = InclusionFilter.Schema().load({"operator": "in", "value": ["valid_one", "valid_two", 10]})
without = InclusionFilter.Schema().load({"operator": "not in", "value": ["valid_one", "valid_two", 10]})
for obj in [Superlocus, Locus, Sublocus, Monosublocus, MonosublocusHolder]:
self.assertTrue(obj.evaluate("valid_one", within))
self.assertTrue(obj.evaluate("valid_three", without))
self.assertTrue(obj.evaluate(10, within))
self.assertTrue(obj.evaluate(15, without))
self.assertFalse(obj.evaluate("valid_one", without))
self.assertFalse(obj.evaluate("valid_three", within))
self.assertFalse(obj.evaluate(10, without))
self.assertFalse(obj.evaluate(15, within))
# Mock to demonstrate that ValueError is raised
_mock = namedtuple("mock", ["operator", "value"])
mock = _mock("foo", "hello")
for obj in [Superlocus, Locus, Sublocus, Monosublocus, MonosublocusHolder]:
with self.assertRaises(ValueError):
obj.evaluate(10, mock)
def test_calculate_metrics(self):
logger = create_default_logger("test_calculate_metrics", level="DEBUG")
stream = io.StringIO()
logger.addHandler(logging.StreamHandler(stream))
for locus_class in [Locus, Sublocus, Superlocus, Monosublocus]:
locus = locus_class(self.transcript1, logger=logger)
self.assertFalse(locus.metrics_calculated)
with self.assertLogs(logger, level="DEBUG"):
locus.calculate_metrics(self.transcript1.id)
self.assertFalse(locus.metrics_calculated)
_ = stream.getvalue()
with self._caplog.at_level(logging.DEBUG, logger=logger.name):
locus.get_metrics()
self.assertGreater(len(stream.getvalue()), 0, self._caplog.text)
self._caplog.clear()
self.assertTrue(locus.metrics_calculated)
stream.close()
locus.logger.removeHandler(stream)
stream = io.StringIO()
locus.logger.addHandler(logging.StreamHandler(stream))
with self._caplog.at_level(logging.DEBUG, logger=logger.name) as cmo:
locus.get_metrics()
logged = stream.getvalue()
self.assertEqual(len(logged), 0, logged)
self._caplog.clear()
stream.close()
def test_in_locus(self):
for child in [Superlocus, Sublocus, Monosublocus, Locus]:
child1 = child(self.transcript1)
self.assertTrue(child.in_locus(child1, self.transcript1))
self.assertTrue(Abstractlocus.in_locus(child1, self.transcript1))
self.assertFalse(child.in_locus(child1, self.transcript2))
self.assertTrue(child.in_locus(child1, self.transcript2, flank=abs(self.transcript1.end - self.transcript2.end)))
self.assertFalse(Abstractlocus.in_locus(child1, self.transcript2))
self.assertTrue(Abstractlocus.in_locus(child1, self.transcript2,
flank=abs(self.transcript1.end - self.transcript2.end)))
with self.assertRaises(TypeError):
child1.in_locus(child1, child1)
with self.assertRaises(TypeError):
Abstractlocus.in_locus(child1, child1)
# Check that we have a suitable error
with self.assertRaises(TypeError):
Abstractlocus.in_locus(self.transcript1, self.transcript2)
def test_load_scores(self):
scores = {self.transcript1.id: 10}
empty_scores = dict()
false_scores = set()
for child in [Superlocus, Sublocus, Monosublocus, Locus]:
child1 = child(self.transcript1)
with self.assertRaises(ValueError):
child1._load_scores(false_scores)
with self.assertRaises(KeyError):
child1._load_scores(empty_scores)
child1._load_scores(scores)
self.assertEqual(child1.scores[self.transcript1.id], 10)
self.assertTrue(child1.scores_calculated)
self.assertTrue(child1.metrics_calculated)
def test_evaluate_overlap(self):
for child in [Superlocus, Sublocus, Monosublocus, Locus]:
child1 = child(self.transcript1)
self.assertFalse(child1._evaluate_transcript_overlap(self.transcript1, self.transcript2)[0])
self.assertTrue(child1._evaluate_transcript_overlap(self.transcript1, self.transcript1)[0])
def test_invalid_sublocus(self):
with self.assertRaises((OSError, FileNotFoundError, exceptions.InvalidConfiguration)):
_ = Sublocus(self.transcript1, configuration="test")
def test_sublocus_from_sublocus(self):
s = Sublocus(self.transcript1)
s2 = Sublocus(s)
self.assertFalse(s.fixed_size)
self.assertTrue(s2.fixed_size)
for attr in ["parent", "chrom", "start", "end", "strand", "attributes"]:
self.assertEqual(getattr(s, attr), getattr(s2, attr))
def test_pickling_unpickling(self):
bed_line = "Chr5\t26585506\t26586850\tID=c58_g1_i2.mrna1.35;coding=False\t99.0\t+\t26585506\t26585507\t0\t5\t383,121,78,105,213\t0,475,710,913,1131"
conf = MikadoConfiguration()
conf.prepare.files.source_score = {"at": 5, "tr": -1, "pb": 1, "st": 0}
t = Transcript(bed_line, source="tr", configuration=conf)
t.finalize()
for metrics in t.get_available_metrics():
try:
rtype = operator.attrgetter("{metric}.rtype")(Transcript)
except AttributeError:
continue
if rtype == "float":
value = random.random()
elif rtype == "bool":
value = random.choice([True, False])
elif rtype == "int":
value = random.randint(0, 1000)
else:
continue
try:
setattr(t, metrics, value)
except AttributeError:
continue
for loctype in Sublocus, Monosublocus, Superlocus, Locus:
locus = loctype(t)
nocus = pickle.loads(pickle.dumps(locus))
for metric in t.get_available_metrics():
original, new = getattr(t, metric), getattr(locus[t.id], metric)
self.assertEqual(original, new, (metric, original, new))
class LocusTester(unittest.TestCase):
logger = create_null_logger(inspect.getframeinfo(inspect.currentframe())[2])
logger_name = logger.name
@classmethod
def setUpClass(cls):
cls.fai = pysam.FastaFile(pkg_resources.resource_filename("Mikado.tests", "chr5.fas.gz"))
def setUp(self):
gff_transcript1 = """Chr1\tfoo\ttranscript\t101\t400\t.\t+\t.\tID=t0
Chr1\tfoo\texon\t101\t400\t.\t+\t.\tID=t0:exon1;Parent=t0
Chr1\tfoo\tCDS\t101\t350\t.\t+\t.\tID=t0:exon1;Parent=t0""".split("\n")
gff_transcript1 = [GFF.GffLine(x) for x in gff_transcript1]
self.assertEqual(gff_transcript1[0].chrom, "Chr1", gff_transcript1[0])
self.transcript1 = Transcript(gff_transcript1[0])
for exon in gff_transcript1[1:]:
self.transcript1.add_exon(exon)
self.transcript1.finalize()
self.assertTrue(self.transcript1.monoexonic)
self.assertEqual(self.transcript1.chrom, gff_transcript1[0].chrom)
gff_transcript2 = """Chr1\tfoo\ttranscript\t101\t600\t.\t+\t.\tID=t1
Chr1\tfoo\texon\t101\t200\t.\t+\t.\tID=t1:exon1;Parent=t1
Chr1\tfoo\texon\t301\t400\t.\t+\t.\tID=t1:exon2;Parent=t1
Chr1\tfoo\texon\t501\t600\t.\t+\t.\tID=t1:exon3;Parent=t1""".split("\n")
gff_transcript2 = [GFF.GffLine(x) for x in gff_transcript2]
self.transcript2 = Transcript(gff_transcript2[0], logger=self.logger)
for exon in gff_transcript2[1:-1]:
self.transcript2.add_exon(exon)
# Test that a transcript cannot be finalized if
# the exons do not define the external boundaries
with self.assertLogs(logger=self.logger_name, level="WARNING") as _:
self.transcript2.finalize()
with self.assertRaises(exceptions.ModificationError):
self.transcript2.add_exon(gff_transcript2[-1])
self.transcript2.finalized = False
self.transcript2.start = 101
self.transcript2.end = 600
self.transcript2.add_exon(gff_transcript2[-1])
self.transcript2.finalize()
self.assertFalse(self.transcript2.monoexonic)
self.assertEqual(self.transcript2.exon_num, len(gff_transcript2) - 1)
# Test that trying to modify a transcript after it has been finalized causes errors
with self.assertRaises(exceptions.ModificationError):
for exon in gff_transcript2[1:]:
self.transcript2.add_exon(exon)
# # Test that creating a superlocus without configuration fails
# with self.assertRaises(exceptions.NoJsonConfigError):
# _ = Superlocus(self.transcript1)
self.my_json = os.path.join(os.path.dirname(__file__), "configuration.yaml")
self.my_json = configurator.load_and_validate_config(self.my_json)
self.my_json.reference.genome = self.fai.filename.decode()
self.assertIsInstance(self.my_json.scoring, ScoringFile, self.my_json.scoring)
def test_locus(self):
"""Basic testing of the Locus functionality."""
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2])
logger.setLevel("CRITICAL")
logger.info("Started")
self.transcript1.logger = logger
self.transcript2.logger = logger
self.assertTrue(self.transcript1.monoexonic)
slocus = Superlocus(self.transcript1,
configuration=self.my_json, logger=logger)
slocus.add_transcript_to_locus(self.transcript2)
self.assertEqual(len(slocus.transcripts), 2)
self.assertEqual(slocus.strand, self.transcript1.strand)
self.assertEqual(slocus.start, min(self.transcript1.start, self.transcript2.start))
self.assertEqual(slocus.end, max(self.transcript1.end, self.transcript2.end))
logger.info(slocus.transcripts)
slocus.define_subloci(check_requirements=False)
self.assertEqual(len(slocus.transcripts), 2)
logger.info(slocus.subloci)
logger.info(slocus.transcripts)
self.assertEqual(len(slocus.transcripts), 2)
self.assertEqual(len(slocus.subloci), 2)
slocus.define_monosubloci(check_requirements=False)
self.assertEqual(len(slocus.monosubloci), 2)
slocus.calculate_mono_metrics()
self.assertEqual(len(slocus.monoholders), 1)
slocus.logger.setLevel("DEBUG")
slocus.define_loci(check_requirements=False)
self.assertEqual(len(slocus.loci), 1)
# self.assertFalse(slocus["t0"].is_coding, slocus["t0"].format("gtf"))
self.assertFalse(slocus["t1"].is_coding, slocus["t1"].format("gtf"))
self.assertEqual(sorted(list(slocus.loci[
list(slocus.loci.keys())[0]].transcripts.keys())), ["t0"])
gff_transcript3 = """Chr1\tfoo\ttranscript\t101\t1000\t.\t-\t.\tID=tminus0
Chr1\tfoo\texon\t101\t600\t.\t-\t.\tID=tminus0:exon1;Parent=tminus0
Chr1\tfoo\tCDS\t201\t500\t.\t-\t.\tID=tminus0:exon1;Parent=tminus0
Chr1\tfoo\texon\t801\t1000\t.\t-\t.\tID=tminus0:exon1;Parent=tminus0""".split("\n")
gff_transcript3 = [GFF.GffLine(x) for x in gff_transcript3]
transcript3 = Transcript(gff_transcript3[0])
for exon in gff_transcript3[1:]:
transcript3.add_exon(exon)
transcript3.finalize()
self.assertGreater(transcript3.combined_cds_length, 0)
self.my_json.pick.clustering.purge = True
logger.setLevel("WARNING")
minusuperlocus = Superlocus(transcript3, configuration=self.my_json)
minusuperlocus.logger = logger
minusuperlocus.define_subloci()
self.assertGreater(len(minusuperlocus.subloci), 0)
minusuperlocus.calculate_mono_metrics()
self.assertGreater(len(minusuperlocus.monoholders), 0)
minusuperlocus.define_loci()
self.assertEqual(len(minusuperlocus.loci), 1)
self.assertTrue(transcript3.strand != self.transcript1.strand)
def test_cannot_add(self):
for strand, stranded, ostrand in itertools.product(("+", "-", None), (True, False), ("+", "-", None)):
with self.subTest(strand=strand, stranded=stranded, ostrand=ostrand):
t1 = "1\t100\t2000\tID=T1;coding=False\t0\t{strand}\t100\t2000\t0\t1\t1900\t0".format(
strand=strand if strand else ".", )
t1 = Transcript(BED12(t1))
t2 = "1\t105\t2300\tID=T2;coding=False\t0\t{strand}\t105\t2300\t0\t1\t2195\t0".format(
strand=strand if strand else ".")
t2 = Transcript(BED12(t2))
sl = loci.Superlocus(t1, stranded=stranded, configuration=None)
self.assertIn(t1.id, sl)
if not stranded or t2.strand == t1.strand:
sl.add_transcript_to_locus(t2)
self.assertIn(t2.id, sl)
else:
with self.assertRaises(exceptions.NotInLocusError):
sl.add_transcript_to_locus(t2)
with self.subTest():
t1 = "1\t100\t2000\tID=T1;coding=False\t0\t+\t100\t2000\t0\t1\t1900\t0"
t1 = Transcript(BED12(t1))
t1.finalize()
t2 = t1.copy()
t2.unfinalize()
t2.chrom = "2"
t2.id = "T2"
t2.finalize()
sl = loci.Superlocus(t1, stranded=False)
with self.assertRaises(exceptions.NotInLocusError):
sl.add_transcript_to_locus(t2)
st1 = "1\t100\t2000\tID=T1;coding=False\t0\t+\t100\t2000\t0\t1\t1900\t0"
t1 = Transcript(BED12(st1))
t1.finalize()
t2 = BED12(st1)
t2.start += 10000
t2.end += 10000
t2.thick_start += 10000
t2.thick_end += 1000
t2 = Transcript(t2)
for flank in [0, 1000, 10000, 20000]:
with self.subTest(flank=flank):
sl = Superlocus(t1, flank=flank)
if flank < 10000:
with self.assertRaises(exceptions.NotInLocusError):
sl.add_transcript_to_locus(t2)
else:
sl.add_transcript_to_locus(t2)
self.assertIn(t2.id, sl)
def test_empty_locus(self):
st1 = "1\t100\t2000\tID=T1;coding=False\t0\t+\t100\t2000\t0\t1\t1900\t0"
t1 = Transcript(BED12(st1))
t1.finalize()
sl = Superlocus(t1)
sl.check_configuration()
sl.remove_transcript_from_locus(t1.id)
_ = sl.segmenttree
def test_verified_introns(self):
"""This method will check that during run-time, the verified introns are considered at
the Superlocus level, not at the Sublocus one."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = "1", "+", "t1"
t1.start, t1.end = 100, 1000
t1.add_exons([(100, 200), (300, 500), (600, 1000)])
t1.finalize()
t1.verified_introns.add((201, 299))
t1.verified_introns.add((501, 599))
t2 = Transcript()
t2.chrom, t2.strand, t2.id = "1", "+", "t2"
t2.start, t2.end = 100, 1000
t2.add_exons([(100, 200), (300, 1000)])
t2.finalize()
t2.verified_introns.add((201, 299))
t3 = Transcript()
t3.chrom, t3.strand, t3.id = "1", "+", "t3"
t3.start, t3.end = 100, 1000
t3.add_exons([(100, 250), (300, 505), (600, 1000)])
t3.finalize()
jconf = configurator.load_and_validate_config(None)
loc = Superlocus(t1, configuration=jconf)
loc.add_transcript_to_locus(t2)
loc.add_transcript_to_locus(t3)
loc.define_subloci()
self.assertEqual(t1.proportion_verified_introns, 1)
self.assertEqual(t1.proportion_verified_introns_inlocus, 1)
self.assertEqual(t2.proportion_verified_introns, 1)
self.assertEqual(t2.proportion_verified_introns_inlocus, 0.5)
self.assertEqual(t3.proportion_verified_introns, 0)
self.assertEqual(t3.proportion_verified_introns_inlocus, 0)
def test_boolean_requirement(self):
logger = create_null_logger()
logger.setLevel("DEBUG")
logger.info("Started")
t1 = Transcript()
t1.chrom, t1.strand, t1.id = "1", "+", "t1"
t1.start, t1.end = 100, 1000
t1.add_exons([(100, 200), (300, 500), (600, 1000)])
t1.finalize()
t1.verified_introns.add((201, 299))
t1.verified_introns.add((501, 599))
t2 = Transcript()
t2.chrom, t2.strand, t2.id = "1", "+", "t2"
t2.start, t2.end = 100, 1000
t2.add_exons([(100, 200), (300, 1000)])
t2.finalize()
t2.verified_introns.add((201, 299))
t3 = Transcript()
t3.chrom, t3.strand, t3.id = "1", "+", "t3"
t3.start, t3.end = 100, 1000
t3.add_exons([(100, 250), (300, 505), (600, 1000)])
t3.finalize()
jconf = configurator.load_and_validate_config(None)
log = create_default_logger("tester", level="DEBUG")
jconf.scoring.requirements.parameters = dict()
jconf.scoring.requirements.expression = ["suspicious_splicing"]
jconf.scoring.requirements.parameters["suspicious_splicing"] = NumBoolEqualityFilter.Schema().load(
{"operator": "ne", "name": "suspicious_splicing", "value": True})
jconf.pick.alternative_splicing.report = False
jconf.scoring.check(minimal_orf_length=jconf.pick.orf_loading.minimal_orf_length)
# Necessary to make sure that the externally-specified requirements are taken in
self.assertEqual(jconf.scoring.requirements._expression, "evaluated[\"suspicious_splicing\"]")
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2])
for suspicious in (False, True):
with self.subTest(suspicious=suspicious):
logger.setLevel("WARNING")
loc = Superlocus(t1, configuration=jconf, logger=logger)
t2.attributes["canonical_on_reverse_strand"] = suspicious
loc.add_transcript_to_locus(t2)
loc.add_transcript_to_locus(t3)
self.assertEqual(len(loc.transcripts), 3)
# loc.logger.setLevel("DEBUG")
loc.define_subloci()
self.assertEqual(len(loc.transcripts), 3 if not suspicious else 2)
def test_reducing_methods_one(self):
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2000, "+", "t1"
t1.add_exons([(1000, 1200), (1500, 2000)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2001, "+", "t2"
t2.add_exons([(999, 1200), (1500, 2001)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 2002, "+", "t3"
t3.add_exons([(999, 1200), (1500, 2002)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one", level="INFO")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
self.assertIn("t1", locus)
self.assertIn("t2", locus)
self.assertIn("t3", locus)
locus.logger.setLevel(level="DEBUG")
transcript_graph = locus.define_graph()
self.assertIn("t1", transcript_graph.neighbors("t2"))
locus.reduce_method_one(transcript_graph)
self.assertNotIn("t1", locus)
self.assertNotIn("t2", locus)
self.assertIn("t3", locus)
def test_reducing_methods_one_2(self):
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2000, "+", "t1"
t1.add_exons([(1000, 1200), (1500, 2000)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2001, "+", "t2"
t2.add_exons([(999, 1200), (1500, 2001)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 2002, "+", "t3"
t3.add_exons([(999, 1200), (1500, 2002)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one_2", level="WARNING") # level="DEBUG")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
locus._complex_limit = (3, 10**4)
locus.define_subloci(check_requirements=False)
self.assertNotIn("t1", locus)
self.assertNotIn("t2", locus)
self.assertIn("t3", locus)
def test_reducing_methods_one_3(self):
t1 = Transcript()
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2000, "+", "t1"
t1.add_exons([(1000, 2000)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2001, "+", "t2"
t2.add_exons([(999, 2001)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 2002, "+", "t3"
t3.add_exons([(999, 2002)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one_2", level="DEBUG")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
locus._complex_limit = (3, 10**4)
locus.define_subloci(check_requirements=False)
self.assertNotIn("t1", locus)
self.assertNotIn("t2", locus)
self.assertIn("t3", locus)
def test_reducing_methods_one_4(self):
t1 = Transcript(is_reference=True)
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2000, "+", "t1"
t1.add_exons([(1000, 2000)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2001, "+", "t2"
t2.add_exons([(999, 2001)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 2002, "+", "t3"
t3.add_exons([(999, 2002)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one_2", level="DEBUG")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
locus._complex_limit = (3, 10**4)
locus.define_subloci(check_requirements=False)
self.assertIn("t1", locus)
self.assertNotIn("t2", locus)
self.assertIn("t3", locus)
def test_reducing_methods_one_and_two(self):
for is_ref in (True, False):
with self.subTest(is_reference=is_ref):
t1 = Transcript(is_reference=is_ref)
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2500, "+", "t1"
t1.add_exons([(1000, 1500), (1800, 2000), (2200, 2500)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2501, "+", "t2"
t2.add_exons([(999, 1500), (1800, 2000), (2200, 2501)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 1999, "+", "t3"
t3.add_exons([(999, 1500), (1800, 1999)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one_2", level="WARNING")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
locus._complex_limit = (1, 10**4)
locus.logger.setLevel("DEBUG")
locus.define_subloci(check_requirements=False)
if is_ref:
self.assertIn("t1", locus)
else:
self.assertNotIn("t1", locus)
self.assertIn("t2", locus)
self.assertNotIn("t3", locus)
def test_reducing_methods_two(self):
for is_ref in (True, False):
with self.subTest(is_ref=is_ref):
t1 = Transcript(is_reference=is_ref)
t1.chrom, t1.start, t1.end, t1.strand, t1.id, = "Chr5", 1000, 2500, "+", "t1"
t1.add_exons([(1000, 1500), (1800, 2000), (2200, 2500)])
t1.finalize()
t2 = Transcript()
t2.chrom, t2.start, t2.end, t2.strand, t2.id, = "Chr5", 999, 2501, "+", "t2"
t2.add_exons([(999, 1500), (1800, 2000), (2200, 2501)])
t2.finalize()
t3 = Transcript()
t3.chrom, t3.start, t3.end, t3.strand, t3.id, = "Chr5", 999, 1999, "+", "t3"
t3.add_exons([(999, 1500), (1800, 1999)])
t3.finalize()
logger = create_default_logger("test_reducing_methods_one_2", level="DEBUG")
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
graph, edges = locus.reduce_method_one(locus.define_graph())
if is_ref:
self.assertIn("t1", graph.nodes)
else:
self.assertNotIn("t1", graph.nodes)
self.assertIn("t2", locus)
self.assertIn("t3", locus)
locus = Superlocus(t1, logger=logger)
locus.add_transcript_to_locus(t2)
locus.add_transcript_to_locus(t3)
_ = locus.reduce_method_two(locus.define_graph())
if is_ref:
self.assertIn("t1", locus)
else:
self.assertNotIn("t1", locus)
self.assertIn("t2", locus)
self.assertNotIn("t3", locus)
def test_choose_best(self):
conf = MikadoConfiguration()
conf.seed = 10
conf.prepare.files.labels = ["Foo", "Bar", "Baz"]
conf.prepare.files.reference = [False, True, True]
t1 = Transcript(source="Foo")
t1.chrom, t1.strand, t1.start, t1.end, t1.id = "Chr1", "+", 101, 1000, "Foo.1"
t1.add_exons([(101, 1000)])
t1.finalize()
t2 = Transcript(source="Bar")
t2.chrom, t2.strand, t2.start, t2.end, t2.id = "Chr1", "+", 101, 1000, "Foo.2"
t2.add_exons([(101, 1000)])
t2.finalize()
t3 = Transcript(source="Baz")
t3.chrom, t3.strand, t3.start, t3.end, t3.id = "Chr1", "+", 101, 1000, "Foo.3"
t3.add_exons([(101, 1000)])
t3.source = "Baz"
t3.finalize()
reference_sources = {source for source, is_reference in
zip(conf.prepare.files.labels,
conf.prepare.files.reference) if is_reference is True}
self.assertEqual(reference_sources, {"Bar", "Baz"})
self.assertEqual(t1.original_source, "Foo")
self.assertEqual(t2.original_source, "Bar")
self.assertEqual(t3.original_source, "Baz")
self.assertTrue(t3.original_source in reference_sources and
t2.original_source in reference_sources)
for locus_type in [Sublocus, Superlocus, Locus]:
locus = locus_type(t1, configuration=conf)
locus.configuration.pick.run_options.reference_update = False
locus.configuration.pick.run_options.only_reference_update = False
locus.configuration.prepare.files.reference = [False, True, True]
locus.add_transcript_to_locus(t2, check_in_locus=False)
locus.add_transcript_to_locus(t3, check_in_locus=False)
locus["Foo.1"].score = 10
locus["Foo.2"].score = 5
locus["Foo.3"].score = 1
self.assertEqual(locus.choose_best(locus.transcripts), "Foo.1")
locus.configuration.pick.run_options.reference_update = True
self.assertTrue(locus.reference_update)
self.assertEqual(locus.choose_best(locus.transcripts), "Foo.2")
locus.configuration.pick.run_options.reference_update = False
locus.configuration.pick.run_options.only_reference_update = True
self.assertEqual(locus.choose_best(locus.transcripts), "Foo.2")
locus.configuration.prepare.files.reference = [False, False, True]
self.assertEqual(locus.choose_best(locus.transcripts), "Foo.3")
locus.configuration.prepare.files.reference = [False, False, False]
self.assertEqual(locus.choose_best(locus.transcripts), "Foo.1")
def test_find_retained_introns_monoexonic(self):
t1 = Transcript(source="Foo")
t1.chrom, t1.strand, t1.start, t1.end, t1.id = "Chr1", "+", 101, 1000, "Foo.1"
t1.add_exons([(101, 1000)])
t1.finalize()
for locus_type in [Sublocus, Superlocus, Locus]:
t1.retained_introns = []
locus = locus_type(t1)
locus.find_retained_introns(locus[t1.id])
self.assertEqual(locus[t1.id].retained_introns, tuple([]))
def test_skip_evaluate_overlap(self):
t1 = Transcript(source="Foo")
t1.chrom, t1.strand, t1.start, t1.end, t1.id = "Chr1", "+", 101, 1000, "Foo.1"
t1.add_exons([(101, 1000)])
t1.finalize()
t2 = Transcript(source="Foo")
t2.chrom, t2.strand, t2.start, t2.end, t2.id = "Chr2", "+", 101, 1000, "Foo.2"
t2.add_exons([(101, 1000)])
t2.finalize()
t3 = Transcript(source="Foo")
t3.chrom, t3.strand, t3.start, t3.end, t3.id = "Chr1", "+", 1101, 2000, "Foo.3"
t3.add_exons([(1101, 2000)])
t3.finalize()
t4 = Transcript(source="Foo")
t4.chrom, t4.strand, t4.start, t4.end, t4.id = "Chr1", "+", 999, 2000, "Foo.4"
t4.add_exons([(999, 2000)])
t4.finalize()
t1.is_reference = True
t2.is_reference = True
t3.is_reference = True
for locus_type in [Sublocus, Superlocus, Locus]:
t4.is_reference = False
for t in (t2, t3, t4):
intersecting, reason = locus_type._evaluate_transcript_overlap(
t1, t, min_cdna_overlap=1, min_cds_overlap=1, check_references=False
)
self.assertFalse(intersecting, reason)
t4.is_reference = True
intersecting, reason = locus_type._evaluate_transcript_overlap(
t1, t4, min_cdna_overlap=1, min_cds_overlap=1, check_references=False
)
self.assertTrue(intersecting, reason)
class ASeventsTester(unittest.TestCase):
logger = create_null_logger("ASevents")
def setUp(self):
self.conf = configurator.load_and_validate_config(None)
self.conf.pick.alternative_splicing.report = True
self.conf.pick.alternative_splicing.valid_ccodes = ["j", "J", "o"]
self.conf.pick.alternative_splicing.redundant_ccodes = ["c", "=", "_", "m"]
self.conf.pick.alternative_splicing.only_confirmed_introns = False
self.conf.pick.alternative_splicing.min_score_perc = 0.5
self.conf.pick.alternative_splicing.keep_retained_introns = True
self.conf.pick.alternative_splicing.min_cdna_overlap = 0.2
self.conf.pick.alternative_splicing.min_cds_overlap = 0.2
self.conf.pick.alternative_splicing.max_isoforms = 3
self.conf.pick.alternative_splicing.cds_only = False
self.t1 = Transcript()
self.t1.chrom = "Chr1"
self.t1.strand = "+"
self.t1.score = 20
self.t1.id = "G1.1"
self.t1.parent = "G1"
self.t1.start = 101
self.t1.end = 1500
self.t1.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1500)],
"exon")
self.t1.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
self.t1.finalize()
self.locus = Locus(self.t1)
self.locus.logger = self.logger
self.locus.configuration = self.conf
def test_not_intersecting(self):
# This one is contained and should be rejected
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G1.1"
t2.parent = "G1"
t2.start = 601
t2.end = 1420
t2.add_exons([(601, 700), (1001, 1300), (1401, 1420)],
"exon")
t2.add_exons([(601, 700), (1001, 1300), (1401, 1420)],
"CDS")
t2.finalize()
self.assertEqual(self.locus.is_alternative_splicing(t2)[:2], (False, "c"))
def test_non_coding_primary(self):
t1 = self.t1.copy()
t1.strip_cds()
locus = Locus(t1, configuration=self.conf, logger=self.logger)
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G1.1"
t2.parent = "G1"
t2.start = 101
t2.end = 1460
t2.add_exons([(101, 500), (601, 700), (1001, 1270), (1401, 1460)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1270), (1401, 1440)],
"CDS")
t2.finalize()
self.assertEqual(locus.is_alternative_splicing(t2), (False, "NA", None))
def test_valid_as(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
self.locus.configuration.pick.alternative_splicing.cds_only = False
self.assertEqual(self.locus.is_alternative_splicing(t2)[:2], (True, "J"))
self.locus.add_transcript_to_locus(t2)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
def test_redundant_as(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
self.locus.add_transcript_to_locus(t2)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
t3 = Transcript()
t3.chrom = "Chr1"
t3.strand = "+"
t3.score = 20
t3.id = "G3.1"
t3.parent = "G3"
t3.start = 201
t3.end = 1630
t3.add_exons([(201, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1630)],
"exon")
t3.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t3.finalize()
self.assertEqual(self.locus.is_alternative_splicing(t3)[:2], (False, "J"))
self.locus.add_transcript_to_locus(t3)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
def test_non_redundant_as(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
# self.t1.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1500)],
# "exon")
# self.t1.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
# "CDS")
t2.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
# self.locus.add_transcript_to_locus(t2)
self.assertEqual(self.locus.is_alternative_splicing(t2)[:2], (True, "J"))
self.locus.configuration.pick.clustering.cds_only = True
self.assertEqual(self.locus.is_alternative_splicing(t2)[:2], (True, "J"))
self.locus.configuration.pick.alternative_splicing.cds_only = True
self.assertEqual(self.locus.is_alternative_splicing(t2)[:2], (False, "="))
def test_redundant_cds_non_redundant_cdna(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 20
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 670), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 670), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
self.locus.add_transcript_to_locus(t2)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
t3 = Transcript()
t3.chrom = "Chr1"
t3.strand = "+"
t3.score = 20
t3.id = "G3.1"
t3.parent = "G3"
t3.start = 201
t3.end = 1630
t3.add_exons([(201, 500), (601, 670), (1001, 1300), (1401, 1460), (1601, 1630)],
"exon")
t3.add_exons([(401, 500), (601, 670), (1001, 1300), (1401, 1440)],
"CDS")
t3.logger = self.logger
t3.finalize()
self.assertEqual(self.locus.is_alternative_splicing(t3)[:2], (True, "j"))
self.locus.configuration.pick.clustering.cds_only = True
self.assertEqual(self.locus.is_alternative_splicing(t3)[:2], (True, "j"))
self.locus.configuration.pick.alternative_splicing.cds_only = True
self.assertEqual(self.locus.is_alternative_splicing(t3)[:2], (False, "j"))
self.locus.configuration.pick.clustering.cds_only = False
self.locus.configuration.pick.alternative_splicing.cds_only = False
self.locus.add_transcript_to_locus(t3)
self.assertEqual(len(self.locus.transcripts), 3, self.locus.transcripts)
self.locus.remove_transcript_from_locus(t3.id)
self.locus.configuration.pick.clustering.cds_only = True
self.locus.add_transcript_to_locus(t3)
self.assertEqual(len(self.locus.transcripts), 3, self.locus.transcripts)
self.locus.remove_transcript_from_locus(t3.id)
self.locus.configuration.pick.alternative_splicing.cds_only = True
self.locus.add_transcript_to_locus(t3)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
def test_lowscore(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
self.locus.add_transcript_to_locus(t2)
self.assertEqual(len(self.locus.transcripts), 2, self.locus.transcripts)
class MonoHolderTester(unittest.TestCase):
logger = create_default_logger("MonoHolderTester")
def setUp(self):
self.conf = dict()
self.t1 = Transcript()
self.t1.chrom = "Chr1"
self.t1.strand = "+"
self.t1.score = 20
self.t1.id = "G1.1"
self.t1.parent = "G1"
self.t1.start = 101
self.t1.end = 1500
self.t1.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1500)],
"exon")
self.t1.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
self.t1.finalize()
self.assertIs(self.t1.is_coding, True)
self.assertTrue(hasattr(self.t1, "configuration"))
def testCdsOverlap(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 700), (1001, 1300), (1401, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1300), (1401, 1440)],
"CDS")
t2.finalize()
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2))
def test_intronMatch(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1600
t2.add_exons([(101, 500), (601, 700), (1001, 1320), (1451, 1460), (1501, 1600)],
"exon")
t2.add_exons([(401, 500), (601, 700), (1001, 1320), (1451, 1460), (1501, 1510)],
"CDS")
t2.finalize()
self.assertTrue(self.t1.is_coding)
self.assertTrue(t2.is_coding)
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2, logger=self.logger))
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2, cds_only=True, logger=self.logger))
def test_intronOverlap(self):
self.t1.strip_cds()
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1470
t2.add_exons([(101, 510), (601, 700), (960, 1350), (1420, 1470)])
t2.finalize()
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2))
def test_intron_contained_in_exon(self):
"""Here the intron is completely contained within an exon. Returns true."""
self.t1.strip_cds()
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1250
t2.end = 2000
t2.add_exons([(1250, 1560), (1800, 2000)])
t2.finalize()
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2))
def test_intron_not_contained_in_exon(self):
self.t1.strip_cds()
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1400
t2.end = 3000
t2.add_exons([(1400, 1560), (2800, 3000)])
t2.finalize()
logger = create_default_logger("test_intron_not_contained_in_exon")
for min_cdna_overlap in (0.01, 1):
with self.subTest(min_cdna_overlap=min_cdna_overlap):
self.assertIs(MonosublocusHolder.is_intersecting(
self.t1, t2,
logger=logger,
cds_only=False,
min_cdna_overlap=min_cdna_overlap,
min_cds_overlap=min_cdna_overlap), (min_cdna_overlap < 0.28))
def test_noCDSOverlap(self):
self.t1.strip_cds()
self.assertEqual(self.t1.combined_cds_introns, set())
self.t1.finalized = False
self.t1.add_exons([(401, 500), (601, 700), (1001, 1100)],
"CDS")
self.t1.finalize()
t2 = Transcript()
t2.logger = self.logger
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 101
t2.end = 1470
t2.add_exons([(101, 510), (601, 700), (960, 1350), (1421, 1470)])
t2.add_exons([(1201, 1350), (1421, 1450)], "CDS")
t2.finalize()
self.assertTrue(self.t1.is_coding)
self.assertTrue(t2.is_coding)
self.assertGreaterEqual(0,
overlap(
(self.t1.combined_cds_start, self.t1.combined_cds_end),
(t2.combined_cds_start, t2.combined_cds_end)),
[(self.t1.combined_cds_start, self.t1.combined_cds_end),
(t2.combined_cds_start, t2.combined_cds_end)])
self.assertTrue(MonosublocusHolder.is_intersecting(self.t1, t2, logger=self.logger))
self.assertFalse(MonosublocusHolder.is_intersecting(self.t1, t2, cds_only=True, logger=self.logger))
def test_only_CDS_overlap(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1250
t2.end = 2000
t2.add_exons([(1250, 1560), (1801, 2000)])
t2.add_exons([(1402, 1560), (1801, 1851)], "CDS")
t2.finalize()
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
for min_cds_overlap in [0.05, 0.1, 0.15, 0.2, 0.5]:
with self.subTest(min_cds_overlap=min_cds_overlap):
self.assertIs(MonosublocusHolder.is_intersecting(self.t1, t2,
cds_only=True,
logger=logger,
min_cds_overlap=min_cds_overlap,
min_cdna_overlap=0.01),
(min_cds_overlap <= 0.19),
(self.t1.internal_orfs, t2.internal_orfs))
t2.strip_cds()
t2.finalized = False
t2.add_exons([(1461, 1560), (1801, 1850)], "CDS")
t2.finalize()
self.assertGreater(len(t2.introns), 0)
self.assertGreater(len(t2.combined_cds_introns), 0)
# No CDS overlap this time, but cDNA overlap.
for cds_only in (True, False):
with self.subTest(cds_only=cds_only):
self.assertIs(MonosublocusHolder.is_intersecting(self.t1,
t2,
cds_only=cds_only,
logger=logger), not cds_only)
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1350
t2.end = 3850
t2.add_exons([(1350, 1560), (2801, 3850)])
t2.add_exons([(1402, 1560), (2801, 3850)], "CDS")
# logger.setLevel("DEBUG")
t2.logger = logger
t2.finalize()
self.assertTrue(t2.is_coding)
for min_overlap in [0.1, 0.2, 0.3, 0.5]:
with self.subTest(min_overlap=min_overlap):
cds_overlap = 0
for frame in range(3):
cds_overlap += len(set.intersection(
self.t1.frames[frame], t2.frames[frame]
))
self.assertIs(MonosublocusHolder.is_intersecting(self.t1, t2,
cds_only=False,
min_cds_overlap=0.07,
min_cdna_overlap=min_overlap,
logger=logger), (min_overlap <= 0.12),
((t2.selected_internal_orf_cds, self.t1.selected_internal_orf_cds),
cds_overlap))
self.assertTrue(t2.is_coding)
for min_overlap in [0.01, 0.05, 0.1, 0.2]:
with self.subTest(min_overlap=min_overlap):
self.assertIs(MonosublocusHolder.is_intersecting(self.t1,
t2,
cds_only=True,
min_cds_overlap=min_overlap,
min_cdna_overlap=min_overlap,
logger=logger), (min_overlap <= 0.07))
def test_frame_compatibility(self):
"""Check that the phase method functions"""
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2])
for phase in [0, 1, 2]:
with self.subTest(phase=phase):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1350 + phase
t2.end = 3850 + phase
t2.add_exons([(t2.start, 1560), (2801, t2.end)])
t2.add_exons([(1402 + phase, 1560), (2801, 3850 + phase)], "CDS")
self.assertIs(t2.is_coding, True)
self.assertIsInstance(self.t1, Transcript)
self.assertIsInstance(t2, Transcript)
self.assertIs(MonosublocusHolder.is_intersecting(self.t1,
t2,
cds_only=True,
min_cds_overlap=0.05,
min_cdna_overlap=0.05,
logger=logger), (phase == 0))
self.t1.unfinalize()
self.t1.strand = "-"
self.t1.phases = {} # Necessary for the correct recalculation of phases!
self.t1.logger = logger
self.t1.finalize()
self.assertIs(self.t1.is_coding, True, "Something went wrong in finalising T1")
for phase in [0, 1, 2]:
with self.subTest(phase=phase):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "-"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1350 + phase
t2.end = 3850 + phase
t2.add_exons([(t2.start, 1560), (2801, t2.end)])
t2.add_exons([(1402 + phase, 1560), (2801, 3850 + phase)], "CDS")
self.assertIs(t2.is_coding, True)
self.assertIs(MonosublocusHolder.is_intersecting(self.t1,
t2,
cds_only=True,
min_cds_overlap=0.05,
min_cdna_overlap=0.05,
logger=logger), (phase == 0))
def test_no_overlap(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G2.1"
t2.parent = "G2"
t2.start = 1600
t2.end = 2000
t2.add_exons([(1600, 1700), (1801, 2000)])
t2.add_exons([(1661, 1700), (1801, 1850)], "CDS")
t2.finalize()
self.assertFalse(MonosublocusHolder.is_intersecting(self.t1, t2))
def test_sameness(self):
t2 = Transcript()
t2.chrom = "Chr1"
t2.strand = "+"
t2.score = 1
t2.id = "G1.1"
t2.parent = "G1"
t2.start = 1250
t2.end = 2000
t2.add_exons([(1250, 1560), (1801, 2000)])
t2.add_exons([(1401, 1560), (1801, 1850)], "CDS")
t2.finalize()
# This fails because they have the same ID
self.assertFalse(MonosublocusHolder.is_intersecting(self.t1, t2))
def test_holder_clustering(self):
"""This test has been added starting from the annotation of IWGSC.
It verifies that in a complex locus we create the holders correctly."""
chrom, strand = "chr7A", "+"
transcripts = dict()
transcripts["TA_PGSB_v1_dez2016_mRNA_662095"] = [
[(711041145, 711041431), (711041641, 711042803), (711059154, 711059935)],
36.17, "sublocus:chr3A+:711041145-711059935.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662100"] = [
[(711056723, 711056806), (711056870, 711057549), (711057994, 711059935)],
49.8, "sublocus:chr3A+:711040605-711059935.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662101"] = [
[(711056723, 711057549), (711057991, 711059935)],
48.02, "sublocus:chr3A+:711056723-711059935.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662106"] = [
[(711056723, 711057549), (711057995, 711058007)],
39.51, "sublocus:chr3A+:711056723-711058007.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662109"] = [
[(711056723, 711057141), (711057213, 711057237)],
35.85, "sublocus:chr3A+:711056723-711057237.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662111"] = [
[(711056723, 711057549), (711057994, 711059935)],
49.97, "sublocus:chr3A+:711040605-711059935.multi"]
transcripts["TA_PGSB_v1_dez2016_mRNA_662116"] = [
[(711056723, 711057610)],
30.7, "sublocus:chr3A+:711056723-711057610.mono"
]
transcripts["TA_PGSB_v1_dez2016_mRNA_662121"] = [
[(711058325, 711059913), (711060068, 711060089)],
36.48, "sublocus:chr3A+:711058325-711060089.multi"
]
superlocus = None
configuration = configurator.load_and_validate_config(None)
for transcript in transcripts:
tr = Transcript()
exons, score, sublocus = transcripts[transcript]
tr.chrom, tr.strand, tr.id = chrom, strand, transcript
tr.add_exons(exons, features="exon")
tr.add_exons(exons, features="CDS")
tr.finalize()
subl = Sublocus(tr, configuration=configuration)
if superlocus is None:
superlocus = Superlocus(tr, configuration=configuration)
else:
superlocus.add_transcript_to_locus(tr, check_in_locus=False)
superlocus.subloci.append(subl)
superlocus.scores[transcript] = score
superlocus.subloci_defined = True
self.assertEqual(len(superlocus.subloci), len(transcripts))
superlocus.logger = create_default_logger("test_holder_clustering", level="WARNING")
self.assertFalse(superlocus.configuration.pick.clustering.simple_overlap_for_monoexonic)
superlocus.configuration.scoring.requirements = None
superlocus.calculate_mono_metrics()
self.assertEqual(len(superlocus.monoholders), 1,
"\n".join([", ".join(list(_.transcripts.keys())) for _ in superlocus.monoholders]))
def test_alternative_splicing_monoexonic_not_enough_overlap(self):
"""This test verifies that while we can cluster together the transcripts at the holder stage,
if the overlap is not enough they will fail to be recognised as valid AS events."""
jconf = configurator.load_and_validate_config(None)
t1, t2 = Transcript(), Transcript()
t1.chrom, t2.chrom = "1", "1"
t1.strand, t2.strand = "+", "+"
t1.add_exons([(1260208, 1260482), (1262216, 1262412), (1262621, 1263851)])
t1.add_exons([(1262291, 1262412), (1262621, 1263143)], features="CDS")
t1.id = "cls-0-sta-combined-0_1.27.12"
t2.add_exons([(1262486, 1264276)])
t2.add_exons([(1263571, 1264236)], features="CDS")
t2.id = "trn-0-sta-combined-0_1_TRINITY_GG_1373_c0_g2_i1.path1"
self.assertTrue(MonosublocusHolder.is_intersecting(
t1, t2, cds_only=False,
min_cdna_overlap=jconf.pick.alternative_splicing.min_cdna_overlap,
min_cds_overlap=jconf.pick.alternative_splicing.min_cds_overlap,
simple_overlap_for_monoexonic=True))
self.assertFalse(MonosublocusHolder.is_intersecting(
t1, t2, cds_only=False,
min_cdna_overlap=jconf.pick.clustering.min_cdna_overlap,
min_cds_overlap=jconf.pick.clustering.min_cds_overlap,
simple_overlap_for_monoexonic=False))
for simple in (True, False):
with self.subTest(simple=simple):
jconf.pick.clustering.simple_overlap_for_monoexonic = simple
self.logger.setLevel("DEBUG")
slocus = Superlocus(t1, configuration=jconf, logger=self.logger)
slocus.add_transcript_to_locus(t2)
locus = Locus(t1, configuration=jconf)
slocus.loci[locus.id] = locus
slocus.define_alternative_splicing()
self.assertEqual(len(slocus.loci[locus.id].transcripts), 1)
class TestLocus(unittest.TestCase):
"""
This unit test is focused on the locus definition and alternative splicings.
"""
logger = create_default_logger("tester")
@classmethod
def setUpClass(cls):
cls.fai = pysam.FastaFile(pkg_resources.resource_filename("Mikado.tests", "chr5.fas.gz"))
def setUp(self):
"""Set up for the unit test."""
# Mock dictionary to be used for the alternative splicing checks
self.configuration = configurator.load_and_validate_config(None)
self.configuration.pick.alternative_splicing.report = True
self.configuration.pick.alternative_splicing.pad = True
self.configuration.pick.alternative_splicing.max_isoforms = 3
self.configuration.pick.alternative_splicing.keep_retained_introns = False
self.configuration.pick.alternative_splicing.min_cds_overlap = 0
self.configuration.pick.alternative_splicing.min_cdna_overlap = 0
self.configuration.pick.alternative_splicing.min_score_perc = 0.1
self.configuration.pick.alternative_splicing.valid_ccodes = ["j", "G", "g"]
self.configuration.pick.alternative_splicing.redundant_ccodes = ["c", "=", "_", "m", "n"]
self.configuration.pick.alternative_splicing.only_confirmed_introns = False
# self.configuration = configurator.check_json(self.configuration)
t1 = """Chr1\tfoo\ttranscript\t1001\t3000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\texon\t1001\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\tCDS\t1101\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\texon\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\tCDS\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\texon\t2101\t2500\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\tCDS\t2101\t2500\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\texon\t2801\t3000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
Chr1\tfoo\tCDS\t2801\t2902\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.1";
"""
t1lines = [GtfLine(line) for line in t1.split("\n") if line]
self.assertEqual(t1lines[0].transcript, "Chr1.1.1", t1lines[0].transcript)
self.assertEqual(t1lines[0].parent, ["Chr1.1"], t1lines[0].parent)
self.assertEqual(t1lines[0].gene, "Chr1.1", t1lines[0].parent)
self.assertEqual(t1lines[0].id, "Chr1.1.1", t1lines[0].id)
self.t1 = loci.Transcript(t1lines[0])
for exon in t1lines[1:]:
if exon.header:
continue
self.t1.add_exon(exon)
self.t1.score = 20
self.t1.finalize()
# Just a fragment of the best
t1_contained = """Chr1\tfoo\ttranscript\t1001\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.2";
Chr1\tfoo\texon\t1001\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.2";
Chr1\tfoo\tCDS\t1101\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.2";
Chr1\tfoo\texon\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.2";
Chr1\tfoo\tCDS\t1701\t1902\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.2";
"""
t1_contained_lines = [GtfLine(line) for line in t1_contained.split("\n") if line]
self.t1_contained = loci.Transcript(t1_contained_lines[0])
for exon in t1_contained_lines[1:]:
if exon.header:
continue
self.t1_contained.add_exon(exon)
self.t1_contained.score = 15
self.t1_contained.finalize()
# Valid AS event
t1_as = """Chr1\tfoo\ttranscript\t1001\t3000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\texon\t1001\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\tCDS\t1101\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\texon\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\tCDS\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\texon\t2101\t2400\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\tCDS\t2101\t2400\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\texon\t2801\t3000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
Chr1\tfoo\tCDS\t2801\t2900\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.3";
"""
t1_as_lines = [GtfLine(line) for line in t1_as.split("\n") if line]
self.t1_as = loci.Transcript(t1_as_lines[0])
for exon in t1_as_lines[1:]:
if exon.header:
continue
self.t1_as.add_exon(exon)
self.t1_as.score = 19
self.t1_as.finalize()
# Retained intron AS event
t1_retained = """Chr1\tfoo\ttranscript\t1001\t2900\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\texon\t1001\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\tCDS\t1101\t1300\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\texon\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\tCDS\t1701\t2000\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\texon\t2101\t2900\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
Chr1\tfoo\tCDS\t2101\t2472\t.\t+\t.\tgene_id "Chr1.1"; transcript_id "Chr1.1.4";
"""
t1_retained_lines = [GtfLine(line) for line in t1_retained.split("\n") if line]
self.t1_retained = loci.Transcript(t1_retained_lines[0])
for exon in t1_retained_lines[1:]:
if exon.header:
continue
self.t1_retained.add_exon(exon)
self.t1_retained.score = 10
self.t1_retained.finalize()
self.logger.setLevel(logging.WARNING)
self.configuration.reference.genome = self.fai.filename.decode()
def test_validity(self):
"""
First test of validity to ensure the CCodes are as expected.
:return:
"""
# The fragment should have a c assigned
result, _ = Assigner.compare(self.t1_contained, self.t1)
self.assertEqual(result.ccode[0], "c")
# The valid AS should have a j assigned
result, _ = Assigner.compare(self.t1_as, self.t1)
self.assertEqual(result.ccode[0], "j")
# The retained intron AS should have a j assigned
result, _ = Assigner.compare(self.t1_retained, self.t1)
self.assertEqual(result.ccode[0], "j", result.ccode)
def testCreate(self):
"""
Test the creation of the locus
:return:
"""
locus = loci.Locus(self.t1, logger=self.logger)
locus.configuration = self.configuration
self.assertEqual(len(locus.transcripts), 1)
def test_exclude_contained(self):
"""Test that we exclude a transcript with a contained class code (c)"""
locus = loci.Locus(self.t1, logger=self.logger)
locus.configuration = self.configuration
self.assertEqual(len(locus.transcripts), 1)
locus.add_transcript_to_locus(self.t1_contained)
self.assertEqual(len(locus.transcripts), 1)
def test_add_contained(self):
"""Test that we add a transcript with a contained class code (c) if
we explicitly ask for it"""
locus = loci.Locus(self.t1, logger=self.logger)
locus.configuration = self.configuration.copy()
locus._add_to_alternative_splicing_codes("c")
self.assertEqual(len(locus.transcripts), 1)
locus.add_transcript_to_locus(self.t1_contained)
self.assertEqual(len(locus.transcripts), 2)
def test_addValid(self):
"""Test that we can successfully add a transcript to the locus if
it passes the muster."""
locus = loci.Locus(self.t1, logger=self.logger)
locus.configuration = self.configuration
self.assertEqual(len(locus.transcripts), 1)
locus.add_transcript_to_locus(self.t1_as)
self.assertEqual(len(locus.transcripts), 2)
def test_excludeValid(self):
"""Test that a usually valid AS is excluded when:
- we ask for no more than one AS event
- we exclude its ccode (j)
- we ask for perfect (100%) CDS overlap
"""
configuration = self.configuration.copy()
configuration.pick.alternative_splicing.max_isoforms = 3
configuration.pick.alternative_splicing.valid_ccodes = ["n", "O", "h"]
locus = loci.Locus(self.t1, logger=self.logger, configuration=configuration)
self.assertEqual(len(locus.transcripts), 1)
locus.add_transcript_to_locus(self.t1_as)
self.assertEqual(len(locus.transcripts), 1)
locus._add_to_alternative_splicing_codes("j")
locus.configuration.pick.alternative_splicing.min_cds_overlap = 1
locus.add_transcript_to_locus(self.t1_as)
self.assertEqual(len(locus.transcripts), 1)
def test_exclude_opposite_strand(self):
candidate = self.t1_as
candidate.reverse_strand()
logger = self.logger
# logger.setLevel(logging.DEBUG)
locus = loci.Locus(self.t1, logger=logger)
locus.configuration = self.configuration.copy()
self.assertEqual(len(locus.transcripts), 1)
locus.add_transcript_to_locus(candidate)
self.assertEqual(len(locus.transcripts), 1)
def test_serialisation(self):
"""Check that the main types can be serialised correctly."""
candidate = self.t1
pickle.dumps(candidate)
configuration = configurator.load_and_validate_config(None)
for obj in Superlocus, Sublocus, Locus:
with self.subTest(obj=obj):
locus = obj(candidate, configuration=configuration)
dumped = pickle.dumps(locus)
recovered = pickle.loads(dumped)
self.assertIsInstance(recovered, obj)
self.assertEqual(recovered, locus)
def test_double_orf(self):
t = Transcript()
t.add_exons([(101, 1000), (1101, 1200), (2001, 2900)])
t.id = "t1"
t.strand = "+"
orf1 = BED12()
orf1.transcriptomic = True
orf1.chrom = t.id
orf1.start = 1
orf1.end = sum([_[1] - _[0] + 1 for _ in t.exons])
orf1.strand = "+"
orf1.name = "t1.orf1"
orf1.block_sizes = (900,)
orf1.thick_start = 1
orf1.thick_end = 900
orf1.block_starts = (1,)
orf1.block_count = 1
orf2 = BED12()
orf2.transcriptomic = True
orf2.strand = "+"
orf2.chrom = t.id
orf2.start = 1
orf2.end = sum([_[1] - _[0] + 1 for _ in t.exons])
orf2.name = "t1.orf2"
orf2.block_sizes = (900,)
orf2.thick_start = 1001
orf2.thick_end = 1900
orf2.block_starts = (1,)
orf2.block_count = 1
self.assertFalse(orf1.invalid)
self.assertFalse(orf2.invalid)
t.load_orfs([orf1, orf2])
self.assertEqual(t.number_internal_orfs, 2)
locus = Locus(t)
locus.filter_and_calculate_scores()
self.assertTrue(list(locus.scores.keys()), [t.id])
rows = list(locus.print_scores())
self.assertEqual(len(rows), 1, rows)
self.assertEqual(rows[0]["tid"], t.id, rows[0])
def test_remove_AS_overlapping(self):
logger = create_null_logger(inspect.getframeinfo(inspect.currentframe())[2],
level="WARNING")
t1, t2, t1_1, t2_1 = Transcript(), Transcript(), Transcript(), Transcript()
t1.chrom = t2.chrom = t1_1.chrom = t2_1.chrom = "1"
t1.id, t2.id, t1_1.id, t2_1.id = "t1", "t2", "t1_1", "t2_1"
t1.strand = t2.strand = t1_1.strand = t2_1.strand = "+"
t1.add_exons([(101, 500), (801, 1000)])
t1.add_exons([(101, 500), (801, 1000)], features="CDS")
t1_1.add_exons([(101, 500), (903, 1100), (1301, 1550)])
t1_1.add_exons([(101, 500), (903, 1100), (1301, 1550)], features="CDS")
t2.add_exons([(1601, 1800), (1901, 2000)])
t2.add_exons([(1601, 1800), (1901, 2000)], features="CDS")
t2_1.add_exons([(1351, 1550), (1651, 1851), (1901, 2000)])
t2_1.add_exons([(1351, 1550), (1651, 1851), (1901, 2000)], features="CDS")
for tr in [t1, t2, t1_1, t2_1]:
with self.subTest(tr=tr):
tr.finalize()
self.assertGreater(tr.combined_cds_length, 0, tr.id)
conf = configurator.load_and_validate_config(None)
conf.pick.alternative_splicing.valid_ccodes = ["j", "J", "g", "G"]
conf.pick.alternative_splicing.only_confirmed_introns = False
conf.as_requirements = {"_expression": "cdna_length",
"expression": "evaluated['cdna_length']",
"parameters": {
"cdna_length": {"operator": "gt", "value": 0, "name": "cdna_length"}
}}
conf.pick.alternative_splicing.pad = False
conf.pick.alternative_splicing.min_cds_overlap = 0.75
with self.subTest():
superlocus_one = Superlocus(t1, configuration=conf)
superlocus_one.add_transcript_to_locus(t1_1)
locus_one = Locus(t1, configuration=conf)
locus_one.logger = logger
superlocus_one.loci[locus_one.id] = locus_one
superlocus_one.loci_defined = True
with self.assertLogs(logger=logger, level="DEBUG") as cm:
superlocus_one.logger = logger
superlocus_one.define_alternative_splicing()
self.assertEqual(len(superlocus_one.loci), 1)
locus_id = [_ for _ in superlocus_one.loci.keys() if
t1.id in superlocus_one.loci[_].transcripts][0]
self.assertEqual(len(superlocus_one.loci[locus_id].transcripts), 2,
cm.output)
with self.subTest():
superlocus_two = Superlocus(t2, configuration=conf)
superlocus_two.add_transcript_to_locus(t2_1)
locus_two = Locus(t2, configuration=conf)
superlocus_two.loci[locus_two.id] = locus_two
superlocus_two.loci_defined = True
superlocus_two.logger = logger
superlocus_two.define_alternative_splicing()
self.assertEqual(len(superlocus_two.loci), 1)
locus_id = [_ for _ in superlocus_two.loci.keys() if
t2.id in superlocus_two.loci[_].transcripts][0]
self.assertEqual(len(superlocus_two.loci[locus_id].transcripts), 2)
with self.subTest():
superlocus = Superlocus(t1, configuration=conf)
superlocus.add_transcript_to_locus(t2, check_in_locus=False)
superlocus.add_transcript_to_locus(t1_1)
superlocus.add_transcript_to_locus(t2_1)
locus_one = Locus(t1_1, configuration=conf, logger=logger)
locus_two = Locus(t2, configuration=conf, logger=logger)
superlocus.loci[locus_one.id] = locus_one
superlocus.loci[locus_two.id] = locus_two
self.assertEqual(len(superlocus.loci[locus_one.id].transcripts), 1)
self.assertEqual(len(superlocus.loci[locus_two.id].transcripts), 1)
superlocus.loci_defined = True
with self.assertLogs(logger=logger, level="DEBUG") as cm:
self.assertEqual(len(superlocus.loci), 2)
superlocus.define_alternative_splicing()
locus_one_id = [_ for _ in superlocus.loci.keys() if
t1_1.id in superlocus.loci[_].transcripts][0]
locus_two_id = [_ for _ in superlocus.loci.keys() if
t2.id in superlocus.loci[_].transcripts][0]
self.assertNotEqual(locus_one_id, locus_two_id)
self.assertEqual(len(superlocus.loci), 2)
self.assertEqual(len(superlocus.loci[locus_two_id].transcripts), 1,
(cm.output, superlocus.loci[locus_one_id].transcripts.keys()))
self.assertEqual(len(superlocus.loci[locus_one_id].transcripts), 1,
(cm.output, superlocus.loci[locus_one_id].transcripts.keys()))
class EmptySuperlocus(unittest.TestCase):
def test_empty(self):
logger = create_null_logger()
logger.setLevel("WARNING")
with self.assertLogs(logger=logger, level="WARNING"):
_ = Superlocus(transcript_instance=None)
class WrongSplitting(unittest.TestCase):
def test_split(self):
t1 = Transcript(BED12("Chr1\t100\t1000\tID=t1;coding=False\t0\t+\t100\t1000\t0\t1\t900\t0"))
t2 = Transcript(BED12("Chr1\t100\t1000\tID=t2;coding=False\t0\t-\t100\t1000\t0\t1\t900\t0"))
sl = Superlocus(t1, stranded=False)
sl.add_transcript_to_locus(t2)
splitted = list(sl.split_strands())
self.assertEqual(len(splitted), 2)
self.assertIsInstance(splitted[0], Superlocus)
self.assertIsInstance(splitted[1], Superlocus)
self.assertTrue(splitted[0].stranded)
self.assertTrue(splitted[1].stranded)
def test_invalid_split(self):
t1 = Transcript(BED12("Chr1\t100\t1000\tID=t1;coding=False\t0\t+\t100\t1000\t0\t1\t900\t0"))
t2 = Transcript(BED12("Chr1\t100\t1000\tID=t2;coding=False\t0\t+\t100\t1000\t0\t1\t900\t0"))
logger = create_default_logger("test_invalid_split", level="WARNING")
with self.assertLogs(logger=logger, level="WARNING") as cm:
sl = Superlocus(t1, stranded=True, logger=logger)
sl.add_transcript_to_locus(t2)
splitted = list(sl.split_strands())
self.assertEqual(splitted[0], sl)
self.assertEqual(len(splitted), 1)
self.assertIn("WARNING:test_invalid_split:Trying to split by strand a stranded Locus, {}!".format(sl.id),
cm.output, cm.output)
class WrongLoadingAndIntersecting(unittest.TestCase):
def test_wrong_intersecting(self):
t1 = Transcript(BED12("Chr1\t100\t1000\tID=t1;coding=False\t0\t+\t100\t1000\t0\t1\t900\t0"))
sl = Superlocus(t1, stranded=True)
with self.subTest():
self.assertFalse(sl.is_intersecting(t1, t1))
t2 = Transcript(BED12("Chr1\t100\t1000\tID=t1;coding=False\t0\t-\t100\t1000\t0\t1\t900\t0"))
with self.subTest():
self.assertTrue(sl.is_intersecting(t1, t2))
def test_coding_intersecting(self):
t1 = Transcript(BED12("Chr1\t100\t1000\tID=t1;coding=True\t0\t+\t200\t500\t0\t1\t900\t0"))
sl = Superlocus(t1, stranded=True)
t2 = Transcript(BED12("Chr1\t100\t1000\tID=t2;coding=True\t0\t+\t600\t900\t0\t1\t900\t0"))
t3 = Transcript(BED12("Chr1\t100\t1000\tID=t3;coding=True\t0\t+\t300\t600\t0\t1\t900\t0"))
t1.finalize()
t2.finalize()
t3.finalize()
self.assertTrue(t1.is_coding)
self.assertTrue(t2.is_coding)
self.assertTrue(t3.is_coding)
self.assertNotEqual(t1, t2)
self.assertNotEqual(t1, t3)
with self.subTest():
self.assertTrue(sl.is_intersecting(t1, t2, cds_only=False))
self.assertFalse(sl.is_intersecting(t1, t2, cds_only=True))
with self.subTest():
self.assertTrue(sl.is_intersecting(t1, t3, cds_only=False))
self.assertTrue(sl.is_intersecting(t1, t3, cds_only=True))
class RetainedIntronTester(unittest.TestCase):
def setUp(self):
self.my_json = os.path.join(os.path.dirname(__file__), "configuration.yaml")
self.my_json = configurator.load_and_validate_config(self.my_json)
def test_5utr_vs_3utr(self):
t1 = Transcript()
t1.chrom, t1.id = "Chr1", "t1"
t1.add_exons([(101, 300), (501, 700), (901, 1000), (1301, 1500), (1801, 2000)])
# 100 200 100 200 60
t1.add_exons([(201, 300), (501, 700), (901, 1000), (1301, 1500), (1801, 1860)], features="CDS")
t2 = Transcript()
t2.chrom, t2.id = "Chr1", "t2"
t2.add_exons([(101, 700), (901, 1000), (1301, 1500), (1801, 2000)])
# 40 200 60
t2.add_exons([(961, 1000), (1301, 1500), (1801, 1860)], features="CDS")
for strand, cds_disr in zip(["+", "-"], [True, False]):
with self.subTest(strand=strand, cds_disr=cds_disr):
t1.unfinalize()
t2.unfinalize()
t1.strand, t2.strand = strand, strand
t1.finalize()
t2.finalize()
s = Superlocus(t1)
s.add_transcript_to_locus(t2, check_in_locus=False)
s.find_retained_introns(s["t1"])
s.find_retained_introns(s["t2"])
self.assertTrue(s["t2"].retained_introns, [(101, 700)])
self.assertEqual(s["t2"].cds_disrupted_by_ri, cds_disr)
def test_ri_in_monoexonic(self):
for strand in ("+", "-"):
with self.subTest(strand=strand):
logger = create_default_logger("test_ri_in_monoexonic_{strand}".format(**locals()),
level="INFO")
t = Transcript()
t.chrom, t.strand, t.id, t.parent = "Chr1", strand, "t1", "gene"
t.add_exons([(101, 200), (301, 600), (901, 1200), (1501, 1800)])
t.add_exons([(171, 200), (301, 600), (901, 1200), (1501, 1530)], features="CDS")
t2 = Transcript()
t2.chrom, t2.strand, t2.id, t2.parent = "Chr1", strand, "t2", "gene"
t2.add_exons([(101, 650)])
t2.add_exons([(251, 580)], features="CDS")
t3 = Transcript()
t3.chrom, t3.strand, t3.id, t3.parent = "Chr1", strand, "t3", "gene"
t3.add_exons([(151, 470)])
t3.add_exons([(251, 470)], features="CDS")
t.finalize()
t2.finalize()
t3.finalize()
sl = Superlocus(t, logger=logger)
sl.add_transcript_to_locus(t2)
sl.add_transcript_to_locus(t3)
sl.logger.setLevel("DEBUG")
sl.find_retained_introns(t2)
sl.find_retained_introns(t3)
self.assertEqual(t2.retained_introns, (t2.exons[0],))
self.assertEqual(t3.retained_introns, (t3.exons[0],))
self.assertTrue(t2.cds_disrupted_by_ri)
self.assertTrue(t3.cds_disrupted_by_ri)
def test_cds_disr_five_utr(self):
lines = """Chr1 100 2682 ID=test_0;coding=True;phase=0 0 + 497 2474 0 7 208,201,41,164,106,170,715 0,351,780,1075,1439,1616,1867
Chr1 100 2682 ID=test_1;coding=True;phase=0 0 + 497 2474 0 6 552,41,164,106,170,715 0,780,1075,1439,1616,1867
Chr1 100 2682 ID=test_2;coding=True;phase=0 0 + 1228 2474 0 5 821,164,106,170,715 0,1075,1439,1616,1867
Chr1 100 2682 ID=test_3;coding=True;phase=0 0 + 497 2474 0 7 234,201,41,164,106,170,715 0,351,780,1075,1439,1616,1867
"""
beds = [BED12(line) for line in lines.split("\n")]
transcripts = dict((bed.id, Transcript(bed)) for bed in beds if not bed.header)
sl = Superlocus(transcripts["test_0"])
for tid in transcripts:
sl.add_transcript_to_locus(transcripts[tid], check_in_locus=False)
sl.filter_and_calculate_scores()
for tid in sl:
if tid == "test_2":
self.assertTrue(sl[tid].cds_disrupted_by_ri)
self.assertEqual(sl[tid].retained_introns, ((101, 921),))
else:
self.assertFalse(sl[tid].cds_disrupted_by_ri)
def test_real_retained_pos(self):
"""Here we verify that a real retained intron is called as such"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1600)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1420), # 220
], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "+", "t3"
t3.add_exons([(101, 500), (801, 970), (1100, 1180)])
t3.add_exons([(101, 500), (801, 970), (1100, 1130)], features="CDS")
t3.finalize()
for pred, retained in [(t2, True), (t3, False)]:
with self.subTest(pred=pred, retained=retained):
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(pred)
sup.find_retained_introns(pred)
self.assertEqual((len(sup.transcripts[pred.id].retained_introns) > 0),
retained, (pred.id, retained))
def test_false_ri(self):
t1 = [[(11, 100), (301, 400), (451, 500)],
[(71, 100), (301, 400), (451, 470)]]
t2 = [[(11, 150), (301, 400), (451, 500)],
[(121, 150), (301, 400), (451, 470)]]
for strand in ("+", "-"):
with self.subTest(strand=strand):
tr1 = Transcript()
tr1.add_exons(t1[0])
tr1.add_exons(t1[0], features="CDS")
tr1.strand = strand
tr1.id = "t1"
tr1.finalize()
self.assertTrue(tr1.is_coding)
tr2 = Transcript()
tr2.add_exons(t2[0])
tr2.add_exons(t2[0], features="CDS")
tr2.strand = strand
tr2.id = "t2"
self.assertTrue(tr2.is_coding)
logger = create_default_logger("test_false_ri", level="DEBUG")
sup = Superlocus(tr1, logger=logger)
sup.add_transcript_to_locus(tr2)
sup.filter_and_calculate_scores(check_requirements=False)
self.assertFalse(sup["t1"].cds_disrupted_by_ri)
self.assertEqual(sup["t1"].retained_intron_num, 0)
self.assertFalse(sup["t2"].cds_disrupted_by_ri)
self.assertEqual(sup["t2"].retained_intron_num, 0)
def test_false_positive_retained_introns(self):
bed1 = "\t".join(str(_) for _ in
["Chr1", 33677, 37762,
"ID=mikado.Chr1G2.1;coding=True;phase=0;alias=trinity_c0_g1_i1.mrna1.2",
19.0, "-", 33991, 37061, 0, 9,
"650,1074,81,234,62,112,181,26,194", "0,723,1889,2052,2946,3132,3345,3695,3891"])
bed2 = "\t".join(str(_) for _ in
["Chr1", 33677, 37762,
"ID=mikado.Chr1G2.2;coding=True;phase=0;alias=stringtie_Stringtie_TH.27.2",
19.0, "-", 33991, 37061, 0, 9,
"650,1071,81,234,62,112,181,26,194", "0,723,1889,2052,2946,3132,3345,3695,3891"])
bed3 = "\t".join(str(_) for _ in
["Chr1", 33677, 37762,
"ID=mikado.Chr1G2.3;coding=True;phase=0;alias=stringtie_Stringtie_TH.27.5",
11.0, "-", 34833, 37061, 0, 10,
"650,455,545,81,234,62,112,181,26,194", "0,723,1252,1889,2052,2946,3132,3345,3695,3891"])
bed4 = "\t".join(str(_) for _ in
["Chr1", 33677, 37762,
"ID=mikado.Chr1G1.2;coding=True;phase=0;alias=stringtie_Stringtie_TH.27.6",
15.15,
"-", 34833, 37061, 0, 11,
"181,347,455,545,81,234,62,112,181,26,194",
"0,303,723,1252,1889,2052,2946,3132,3345,3695,3891"])
beds = [BED12(line) for line in [bed1, bed2, bed3, bed4]]
transcripts = [Transcript(bed) for bed in beds]
[transcript.finalize() for transcript in transcripts]
sstart = 0
for transcript in transcripts:
print(transcript.id, end="\t")
print(transcript.start - sstart, transcript.end - sstart, transcript.combined_cds_start - sstart,
transcript.combined_cds_end - sstart, end="\t", sep="\t")
[print((_[0] - sstart, _[1] - sstart), end="\t") for _ in sorted(transcript.introns)]
print("\n")
self.assertTrue([transcript.is_coding for transcript in transcripts])
sup = Superlocus(transcripts[0], stranded=True)
for transcript in transcripts[1:]:
sup.add_transcript_to_locus(transcript, check_in_locus=False)
[sup.find_retained_introns(transcript) for transcript in sup.transcripts.values()]
for transcript in sup.transcripts.values():
self.assertFalse(transcript.cds_disrupted_by_ri, (transcript.id,
transcript.retained_introns,
transcript.cds_disrupted_by_ri))
def test_retained_pos_truncated(self):
"""Here we verify that a real retained intron is called as such,
even when the transcript is truncated."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1420)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1420), # 220
], features="CDS")
t2.finalize()
self.assertEqual(t2.combined_cds_end, 1420)
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "+", "t3"
t3.add_exons([(101, 500), (801, 970), (1100, 1130)])
t3.add_exons([(101, 500), (801, 970), (1100, 1130)], features="CDS")
t3.finalize()
logger = create_default_logger("test_retained_pos_truncated")
for pred, retained in [(t2, True), (t3, False)]:
with self.subTest(pred=pred, retained=retained):
logger.setLevel("WARNING")
sup = Superlocus(t1, configuration=self.my_json, logger=logger)
sup.add_transcript_to_locus(pred)
sup.find_retained_introns(pred)
self.assertEqual((len(sup.transcripts[pred.id].retained_introns) > 0),
retained, (pred.id, retained, pred.retained_introns))
# Now check that things function also after unpickling
unpickled_t1 = pickle.loads(pickle.dumps(t1))
unpickled_other = pickle.loads(pickle.dumps(pred))
logger.setLevel("WARNING")
sup = Superlocus(unpickled_t1, configuration=self.my_json, logger=logger)
sup.add_transcript_to_locus(unpickled_other)
sup.find_retained_introns(pred)
self.assertEqual((len(sup.transcripts[pred.id].retained_introns) > 0),
retained)
def test_real_retained_pos_truncated(self):
"""Here we verify that a real retained intron is *NOT* called as such when
the transcript is truncated and we elect not to investigate the 3' end."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1420)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1420), # 220
], features="CDS")
t2.finalize()
self.assertEqual(t2.combined_cds_end, 1420)
logger = create_default_logger("test_real_retained_pos_truncated")
sup = Superlocus(t1, configuration=self.my_json, logger=logger)
sup.add_transcript_to_locus(t2)
sup.logger.setLevel("DEBUG")
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((1201, 1420),))
self.assertTrue(sup.transcripts["t2"].cds_disrupted_by_ri)
def test_real_retained_neg_truncated(self):
"""Here we verify that a real retained intron is called as such,
even when the transcript is truncated."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(601, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(601, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t2.finalize()
self.assertEqual(t2.combined_cds_end, 601)
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "-", "t3"
t3.add_exons([(551, 580), (801, 1000), (1201, 1300), (1501, 1800)])
t3.add_exons([(551, 580),
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t3.finalize()
self.assertEqual(t3.combined_cds_end, 551)
for pred, retained in [(t2, True), (t3, False)]:
with self.subTest(pred=pred, retained=retained):
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(pred)
sup.find_retained_introns(pred)
self.assertEqual((len(sup.transcripts[pred.id].retained_introns) > 0),
retained, (pred.id, pred.retained_introns))
def test_real_retained_neg_truncated_2(self):
"""Here we verify that a real retained intron is *NOT* called as such when
the transcript is truncated and we elect not to investigate the 3' end."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(601, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(601, 1000), # 200
(1201, 1300), #100
(1501, 1530) # 30
], features="CDS")
t2.finalize()
self.assertEqual(t2.combined_cds_end, 601)
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((601, 1000),))
self.assertTrue(sup.transcripts["t2"].cds_disrupted_by_ri)
def test_real_retained_pos_noCDS(self):
"""Here we verify that a real retained intron is called as such, even when the transcript lacks a CDS"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1600)])
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.logger = create_default_logger("test_real_retained_pos_noCDS", level="DEBUG")
# sup.logger.setLevel("DEBUG")
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((1201, 1600),))
def test_not_retained_pos(self):
"""Here we verify that a false retained intron is not called as such"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1600)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1540), # 340
], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "+", "t3"
t3.add_exons([(101, 500), (801, 970), (1100, 1130)])
t3.add_exons([(101, 500), (801, 970), (1100, 1130)], features="CDS")
t3.finalize()
for pred in [t2, t3]:
with self.subTest(pred=pred):
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(pred)
sup.find_retained_introns(pred)
if pred == t2:
self.assertEqual(sup.transcripts[pred.id].retained_intron_num, 1)
else:
self.assertEqual(sup.transcripts[pred.id].retained_intron_num, 0)
self.assertFalse(sup.transcripts[pred.id].cds_disrupted_by_ri)
unpickled_t1 = pickle.loads(pickle.dumps(t1))
unpickled_other = pickle.loads(pickle.dumps(pred))
sup = Superlocus(unpickled_t1, configuration=self.my_json)
sup.add_transcript_to_locus(unpickled_other)
sup.find_retained_introns(unpickled_other)
if pred == t2:
self.assertEqual(sup.transcripts[unpickled_other.id].retained_intron_num, 1)
else:
self.assertEqual(sup.transcripts[unpickled_other.id].retained_intron_num, 0)
self.assertFalse(sup.transcripts[unpickled_other.id].cds_disrupted_by_ri)
def test_neg_retained_example(self):
t1 = Transcript()
t1.chrom = "Chr1"
t1.id = "t1"
t1.add_exons([(3168512, 3168869),(3168954, 3169234),(3169327, 3169471),
(3169589, 3170045),(3170575, 3170687),(3170753, 3170803)])
t1.strand = "-"
t1.add_exons(
[(3168568, 3168869), (3168954, 3169234), (3169327, 3169471), (3169589, 3170045), (3170575, 3170682)],
features="CDS"
)
t1.finalize()
t2 = Transcript()
t2.chrom = "Chr1"
t2.id = "t2"
t2.strand = "-"
t2.add_exons(
[(3168483, 3168869),(3168954, 3169234),(3169327, 3169471),(3169589, 3170816)]
)
t2.add_exons(
[(3168568, 3168869),(3168954, 3169234),(3169327, 3169471),(3169589, 3170192)],
features="CDS")
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertGreater(sup.transcripts[t2.id].retained_intron_num, 0)
def test_real_retained_neg(self):
"""Here we verify that a real retained intron is called as such"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(401, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(1501, 1530), # 30
(1201, 1300), # 100
(771, 1000) # 230
], features="CDS")
t2.finalize()
with self.subTest():
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.logger = create_default_logger("test_real_retained_neg",
level="DEBUG")
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((401, 1000),))
with self.subTest():
unpickled_t1 = pickle.loads(pickle.dumps(t1))
unpickled_other = pickle.loads(pickle.dumps(t2))
sup = Superlocus(unpickled_t1, configuration=self.my_json)
sup.add_transcript_to_locus(unpickled_other)
sup.find_retained_introns(unpickled_other)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((401, 1000),))
def test_real_retained_neg(self):
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(601, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(1501, 1530), # 30
(1201, 1300), # 100
(771, 1000) # 230
], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "+", "t3"
t3.add_exons([(401, 1000), (1201, 1300), (1501, 1800)])
t3.add_exons([(831, 1000), # 200
(1201, 1300),
(1501, 1530)
], features="CDS")
t3.finalize()
graph = Abstractlocus._calculate_graph([t1, t2, t3])
exons = set.union(*[set(_.exons) for _ in [t1, t2, t3]])
introns = set.union(*[_.introns for _ in [t1, t2, t3]])
segmenttree = Abstractlocus._calculate_segment_tree(exons, introns)
logger=create_default_logger("test_real_retained_neg", level="WARNING")
# logger.setLevel("DEBUG")
self.assertTrue(
Abstractlocus._is_exon_retained((401, 1000),
"-",
segmenttree,
graph,
[Interval(401, 830)],
introns=set.union(
t1.introns, t2.introns, t3.introns
),
cds_introns=set.union(
t1.combined_cds_introns,
t2.combined_cds_introns,
t3.combined_cds_introns
),
internal_splices={1000},
logger=logger))
for alt, num_retained, cds_disrupted in zip([t2, t3], [1, 1], [True, True]):
unpickled_t1 = pickle.loads(pickle.dumps(t1))
unpickled_alt = pickle.loads(pickle.dumps(alt))
with self.subTest(alt=alt):
sup = Superlocus(t1, configuration=self.my_json, logger=logger)
logger.setLevel("DEBUG")
sup.find_retained_introns(alt)
self.assertEqual(alt.retained_intron_num, num_retained,
(alt.id, alt.retained_introns))
self.assertEqual(alt.cds_disrupted_by_ri, cds_disrupted,
(alt.id, alt.retained_introns))
logger.setLevel("WARNING")
with self.subTest(alt=alt):
sup = Superlocus(unpickled_t1, configuration=self.my_json)
sup.find_retained_introns(unpickled_alt)
self.assertEqual(unpickled_alt.retained_intron_num, num_retained,
unpickled_alt.retained_introns)
def test_consider_cds_only(self):
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(831, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "-", "t3"
t3.add_exons([(401, 1000), (1201, 1300), (1501, 1800)])
t3.add_exons([(831, 1000), # 200
(1201, 1300),
(1501, 1530)
], features="CDS")
t3.finalize()
graph = Abstractlocus._calculate_graph([t1, t3])
exons = set.union(*[set(_.combined_cds) for _ in [t1, t3]])
introns = set.union(*[_.combined_cds_introns for _ in [t1, t3]])
logger = create_default_logger("test_consider_cds_only", level="DEBUG")
logger.debug("Exons: %s", exons)
logger.debug("Introns: %s", introns)
segmenttree = Abstractlocus._calculate_segment_tree(exons, introns)
logger.setLevel("DEBUG")
self.assertEqual((False, False),
Abstractlocus._is_exon_retained(
(401, 1000), "-", segmenttree, graph, [Interval(401, 830)],
introns=set.union(t1.introns, t3.introns),
cds_introns=set.union(t1.combined_cds_introns, t3.combined_cds_introns),
internal_splices={1000},
logger=logger))
def test_issue_255(self):
beds = ['Calendula_officinalis_EIV1.1_Contig_0000010\t81356\t84036\tID=CoDisc.hq_lq_UnnamedSample_HQ_transcript/78575.mrna1;coding=True;phase=0\t20.82\t-\t81605\t83927\t0\t12\t315,15,211,44,178,185,62,88,93,96,44,179\t0,391,518,818,960,1216,1486,1640,1819,2169,2365,2501',
'Calendula_officinalis_EIV1.1_Contig_0000010\t81356\t84036\tID=CoDisc_scallop_CoDisc_sca.200.0.3;coding=True;phase=1\t7.99\t-\t82161\t84036\t0\t9\t862,178,185,62,88,93,96,44,179\t0,960,1216,1486,1640,1819,2169,2365,2501',
'Calendula_officinalis_EIV1.1_Contig_0000010\t81356\t84036\tID=CoDisc_scallop_CoDisc_sca.200.0.0;coding=True;phase=0\t15.07\t-\t81605\t83927\t0\t11\t315,211,44,178,185,62,88,93,96,44,179\t0,518,818,960,1216,1486,1640,1819,2169,2365,2501',
'Calendula_officinalis_EIV1.1_Contig_0000010\t81356\t84036\tID=CoRay_scallop_CoRay_sca.180.0.2;coding=True;phase=0\t16.010000228881836\t-\t81714\t83927\t0\t11\t406,211,44,178,185,62,88,93,96,44,179\t0,518,818,960,1216,1486,1640,1819,2169,2365,2501',
'Calendula_officinalis_EIV1.1_Contig_0000010\t81356\t84036\tID=CoDisc_scallop_CoDisc_sca.200.0.4;coding=True;phase=0\t9.46\t-\t81838\t83927\t0\t10\t729,44,178,185,62,88,93,96,44,179\t0,818,960,1216,1486,1640,1819,2169,2365,2501']
beds = [BED12(bed) for bed in beds]
assert all([bed.header is False for bed in beds])
transcripts = dict((bed.id, Transcript(bed)) for bed in beds)
[transcripts[tid].finalize() for tid in transcripts]
self.assertEqual(len(transcripts), 5)
conf = configurator.load_and_validate_config(None)
conf.pick.alternative_splicing.keep_cds_disrupted_by_ri = False
conf.pick.alternative_splicing.pad = False
logger = create_default_logger("test_issue_255", level="WARNING")
scores = dict((tid, transcripts[tid].score) for tid in transcripts)
scores = list(sorted(scores.items(), key=operator.itemgetter(1), reverse=True))
self.assertEqual(len(scores), 5)
locus = Locus(transcripts[scores[0][0]], configuration=conf, logger=logger)
for tid, score in scores[1:]:
locus.add_transcript_to_locus(transcripts[tid], check_in_locus=False)
self.assertEqual(len(locus.transcripts), len(beds), scores)
locus.logger.setLevel("DEBUG")
locus.find_retained_introns(locus["CoDisc_scallop_CoDisc_sca.200.0.4"])
self.assertTrue(locus["CoDisc_scallop_CoDisc_sca.200.0.4"].retained_intron_num > 0)
self.assertTrue(locus["CoDisc_scallop_CoDisc_sca.200.0.4"].cds_disrupted_by_ri)
locus.find_retained_introns(locus["CoDisc_scallop_CoDisc_sca.200.0.3"])
self.assertTrue(locus["CoDisc_scallop_CoDisc_sca.200.0.3"].retained_intron_num > 0)
self.assertTrue(locus["CoDisc_scallop_CoDisc_sca.200.0.3"].cds_disrupted_by_ri)
def test_not_retained_neg(self):
"""Here we verify that a false retained intron is not called as such"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(301, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(1501, 1530), # 30
(1201, 1300), # 100
(471, 1000) # 230
], features="CDS")
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
self.assertEqual(t2.cds_tree.find(301, 1000),
[Interval(471, 1000)])
self.assertEqual(Abstractlocus._exon_to_be_considered((301, 1000), t2),
(True, [(301, 470)], {1000}),
Abstractlocus._exon_to_be_considered((301, 1000), t2))
graph = Abstractlocus._calculate_graph([t1, t2])
segmenttree = Abstractlocus._calculate_segment_tree(set.union(set(t1.exons), set(t2.exons)),
set.union(t1.introns, t2.introns))
self.assertEqual(
(True, False),
Abstractlocus._is_exon_retained((301, 1000),
"-",
segmenttree, graph, [(301, 470)],
introns=set.union(t1.introns, t2.introns),
internal_splices={1000},
cds_introns=set.union(t1.combined_cds_introns, t2.combined_cds_introns),
logger=create_default_logger("test_not_retained_neg",
level="DEBUG")))
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 1,
sup.transcripts["t2"].retained_introns)
self.assertEqual(sup.transcripts["t2"].retained_introns, ((301, 1000),),
sup.transcripts["t2"].retained_introns)
self.assertFalse(sup.transcripts["t2"].cds_disrupted_by_ri)
def test_exon_switching_pos(self):
"""Checking that an exon switching is treated correctly as a NON-retained intron. Positive strand case"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (2501, 2800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(2501, 2530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 0)
def test_exon_switching_pos_noCDS(self):
"""Checking that an exon switching is treated correctly as a NON-retained intron even when the CDS is absent.
Positive strand case"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (2501, 2800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(2501, 2530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
# t2.add_exons([(201, 500), # 300
# (801, 1000), # 200
# (1201, 1300), # 100
# (1501, 1530) # 30
# ], features="CDS")
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 0,
sup.transcripts["t2"].retained_introns)
def test_exon_switching_neg(self):
"""Checking that an exon switching is treated correctly as a NON-retained intron. Positive strand case"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (2201, 2300), (2501, 2800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(2201, 2300), # 100
(2501, 2530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(101, 500), (1701, 2000), (2201, 2300), (2501, 2800)])
t2.add_exons([
(1801, 2000), # 200
(2201, 2300), # 100
(2501, 2530) # 30
], features="CDS")
t2.finalize()
self.assertEqual(len(t2.cds_tree), len(t2.combined_cds) + len(t2.combined_cds_introns))
self.assertEqual(len(t2.cds_tree), 5)
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 0)
def test_exon_switching_neg_noCDS(self):
"""Checking that an exon switching is treated correctly as a NON-retained intron even when the CDS is absent.
Positive strand case"""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "-", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (2501, 2800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(2501, 2530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
# t2.add_exons([(201, 500), # 300
# (801, 1000), # 200
# (1201, 1300), # 100
# (1501, 1530) # 30
# ], features="CDS")
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 0)
def test_neg_delayed_cds(self):
t1 = Transcript()
t1.chrom = "Chr1"
t1.start, t1.end, t1.strand, t1.id = 47498, 49247, "-", "cls-0-hst-combined-0_Chr1.7.0"
t1.add_exons([(47498, 47982), (48075, 48852), (48936, 49247)])
t1.add_exons([(47705, 47982), (48075, 48852), (48936, 49166)], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom = "Chr1"
t2.start, t2.end, t2.strand, t2.id = 47485, 49285, "-", "scl-1-hst-combined-0_gene.13.0.1"
t2.add_exons([(47485, 47982), (48075, 49285)])
t2.add_exons([(47705, 47982), (48075, 48813)], features="CDS")
t2.finalize()
logger = create_default_logger("test_neg_delayed_cds", level="WARNING")
sup = Superlocus(t1, configuration=self.my_json, logger=logger)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts[t2.id].retained_intron_num, 1, sup.combined_cds_exons)
def test_mixed_strands(self):
"""Verify that no retained intron is called if the strands are mixed."""
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "-", "t2"
t2.add_exons([(601, 1000), (1201, 1300), (1501, 1800)])
t2.add_exons([(1501, 1530), # 30
(1201, 1300), # 100
(771, 1000) # 230
], features="CDS")
t2.finalize()
sup = Superlocus(t1, configuration=self.my_json, stranded=False)
sup.add_transcript_to_locus(t2)
sup.find_retained_introns(t2)
self.assertEqual(sup.transcripts["t2"].retained_intron_num, 0)
def test_extensive_cases(self):
"""This method implements the case described by D. Swarbreck in issue #255"""
basis = {
"t1": [[(11, 100), (301, 400), (451, 600), (751, 900), (1041, 1200), (1501, 1600)],
[(551, 600), (751, 900), (1041, 1080)]],
# Difference: partially within 5'UTR intron. NOT RI
"t2": [[(11, 180), (301, 400), (451, 600), (751, 900), (1041, 1200), (1501, 1600)],
[(551, 600), (751, 900), (1041, 1080)]],
# Difference: RI in the UTR. NOT CDS disrupted. Same CDS start and end
"t3": [[(11, 400), (451, 600), (751, 900), (1041, 1200), (1501, 1600)],
[(551, 600), (751, 900), (1041, 1080)]],
# Difference: RI in the CDS. NOT CDS disrupted. Different CDS start and end
"t4": [[(11, 100), (301, 400), (451, 600), (751, 1200), (1501, 1600)],
[(551, 600), (751, 1200), (1540, 1600)]],
# Difference: RI in the first CDS exon, leading to premature stop
"t5": [[(11, 100), (301, 400), (451, 680)],
[(551, 640)]],
# Difference: Start of the transcript within first intron. Same CDS
"t6": [[(201, 400), (451, 600), (751, 900), (1041, 1200), (1501, 1600)],
[(551, 600), (751, 900), (1041, 1080)]],
# Difference: novel exon within first intron
"t7": [[(201, 250), (301, 400), (451, 600), (751, 900), (1041, 1200), (1501, 1600)],
[(551, 600), (751, 900), (1041, 1080)]],
# Difference: same UTR, last exon ending within a CDS intron. CDS disrupted
"t8": [[(11, 100), (301, 400), (451, 600), (751, 950)],
[(551, 600), (751, 940)]],
# Difference: AS in the second-to-last exon
"t9": [[(11, 100), (301, 400), (451, 600), (751, 980), (1501, 1600)],
[(551, 600), (751, 970)]],
# Difference: AS in the last exon
"t10": [[(11, 100), (301, 400), (451, 600), (751, 900), (1041, 1200), (1401, 1600)],
[(551, 600), (751, 900), (1041, 1080)]]
}
res = {"t1": ((), False), "t2": ((), False), "t3": (((11, 400),), False), "t4": ((), False),
"t5": (((451, 680),), True), "t6": (((201, 400),), False), "t7": ((), False), "t8": (((751, 950),), True),
"t9": ((), False), "t10": ((), False)}
logger = create_default_logger("test_extensive_cases", "WARNING")
for strand in ("+", "-"):
with self.subTest(strand=strand):
transcripts = dict()
for tid in basis:
transcripts[tid] = Transcript()
transcripts[tid].chrom, transcripts[tid].strand = "Chr1", strand
transcripts[tid].add_exons(basis[tid][0])
transcripts[tid].add_exons(basis[tid][1], features="CDS")
transcripts[tid].id = tid
transcripts[tid].finalize()
self.assertTrue(transcripts[tid].is_coding, tid)
sup = Superlocus(transcripts["t1"])
sup.logger = logger
sup.configuration.pick.clustering.purge = False
self.assertIn("t1", sup.transcripts.keys())
for tid in transcripts:
if tid == "t1":
continue
elif tid == "t5" and strand == "-":
sup.logger.setLevel("DEBUG")
else:
logger.setLevel("WARNING")
sup.add_transcript_to_locus(transcripts[tid], check_in_locus=True)
self.assertIn(tid, sup.transcripts.keys())
sup.filter_and_calculate_scores()
sup.logger.setLevel("WARNING")
for tid in sorted(res.keys()):
self.assertIn(tid, sup.transcripts.keys())
err_message = [strand,
tid,
res[tid],
(sup.transcripts[tid].retained_introns, sup.transcripts[tid].cds_disrupted_by_ri)]
self.assertEqual(
(sup.transcripts[tid].retained_introns, sup.transcripts[tid].cds_disrupted_by_ri),
res[tid],
err_message)
def test_mixed_intron(self):
"""This test verifies that a retained intron with one boundary from one transcript and another from a second
is correctly marked."""
basis = {
"t1": [[(101, 300), (701, 900), (1101, 1300), (1801, 2000)],
[(271, 300), (701, 900), (1101, 1300), (1801, 1850)]], # 30 + 200 + 200 + 60 = 480
"t2": [[(401, 500), (661, 900), (1101, 1500), (1800, 2000)],
[(451, 500), (661, 900), (1101, 1410)]], # 50 + 240 + 320 = 290 + 310 = 600
"t3": [[(401, 500), (661, 1300), (1800, 2000)],
[(451, 500), (661, 1030)]] # 50 + 380 = 420
}
res = {"t1": ((), False), "t2": ((), False), "t3": (((661, 1300),), True)}
logger = create_default_logger("test_mixed_intron", "WARNING")
for strand in ("+", "-"):
transcripts = dict()
for tid in basis:
transcripts[tid] = Transcript()
transcripts[tid].chrom, transcripts[tid].strand = "Chr1", strand
transcripts[tid].add_exons(basis[tid][0])
transcripts[tid].add_exons(basis[tid][1], features="CDS")
transcripts[tid].id = tid
transcripts[tid].finalize()
self.assertTrue(transcripts[tid].is_coding, tid)
sup = Superlocus(transcripts["t1"])
self.assertIn("t1", sup.transcripts.keys())
[sup.add_transcript_to_locus(transcripts[tid]) for tid in transcripts if tid != "t1"]
sup.logger = logger
sup.filter_and_calculate_scores()
for tid in sorted(res.keys()):
self.assertEqual((sup.transcripts[tid].retained_introns, sup.transcripts[tid].cds_disrupted_by_ri),
res[tid],
[tid, res[tid],
(sup.transcripts[tid].retained_introns, sup.transcripts[tid].cds_disrupted_by_ri)])
class PicklingTest(unittest.TestCase):
def setUp(self):
t1 = Transcript()
t1.chrom, t1.strand, t1.id = 1, "+", "t1"
t1.add_exons([(101, 500), (801, 1000), (1201, 1300), (1501, 1800)])
t1.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1300), # 100
(1501, 1530) # 30
], features="CDS")
t1.finalize()
t2 = Transcript()
t2.chrom, t2.strand, t2.id = 1, "+", "t2"
t2.add_exons([(101, 500), (801, 1000), (1201, 1600)])
t2.add_exons([(201, 500), # 300
(801, 1000), # 200
(1201, 1420), # 220
], features="CDS")
t2.finalize()
t3 = Transcript()
t3.chrom, t3.strand, t3.id = 1, "+", "t3"
t3.add_exons([(101, 500), (801, 970), (1100, 1180)])
t3.add_exons([(101, 500), (801, 970), (1100, 1130)], features="CDS")
t3.finalize()
self.t1, self.t2, self.t3 = t1, t2, t3
self.configuration = configurator.load_and_validate_config(None)
def test_transcript_pickling(self):
for transcript in [self.t1, self.t2, self.t3]:
with self.subTest(transcript=transcript):
pickled = pickle.dumps(transcript)
unpickled = pickle.loads(pickled)
self.assertEqual(transcript, unpickled)
self.assertEqual(len(transcript.combined_cds) + len(transcript.combined_cds_introns),
len(unpickled.cds_tree))
self.assertEqual(len(transcript.segmenttree), len(unpickled.segmenttree))
def test_locus_unpickling(self):
for transcript in [self.t1, self.t2, self.t3]:
for (loc_type, loc_name) in [(_, _.__name__) for _ in (Superlocus, Sublocus, Monosublocus, Locus)]:
with self.subTest(transcript=transcript, loc_type=loc_type, loc_name=loc_name):
loc = loc_type(transcript, configuration=self.configuration)
pickled = pickle.dumps(transcript)
unpickled = pickle.loads(pickled)
self.assertEqual(transcript, unpickled)
class PaddingTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fai = pysam.FastaFile(pkg_resources.resource_filename("Mikado.tests", "chr5.fas.gz"))
@staticmethod
def load_from_bed(manager, resource):
transcripts = dict()
with pkg_resources.resource_stream(manager, resource) as bed:
for line in bed:
line = line.decode()
line = BED12(line, coding=True)
line.coding = True
transcript = Transcript(line)
assert transcript.start > 0
assert transcript.end > 0
assert transcript.is_coding, transcript.format("bed12")
transcript.finalize()
transcript.verified_introns = transcript.introns
transcript.parent = "{}.gene".format(transcript.id)
transcripts[transcript.id] = transcript
return transcripts
def test_pad_utr(self):
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
transcripts = self.load_from_bed("Mikado.tests", "pad_utr.bed12")
locus = Locus(transcripts["mikado.Chr5G2.1"], logger=logger)
# [locus._add_to_alternative_splicing_codes(code) for code in ("J", "j", "G", "h")]
self.assertIn("J", locus.configuration.pick.alternative_splicing.valid_ccodes)
locus.configuration.reference.genome = self.fai.filename.decode()
# We need to pad
locus.configuration.pick.alternative_splicing.pad = True
locus.configuration.pick.alternative_splicing.ts_distance = 10000
locus.configuration.pick.alternative_splicing.ts_max_splices = 10
locus.configuration.pick.alternative_splicing.ts_distance = 1000
locus.configuration.pick.alternative_splicing.ts_max_splices = 10
locus.configuration.pick.alternative_splicing.only_confirmed_introns = False
locus.configuration.pick.alternative_splicing.min_cdna_overlap = 0.1
locus.logger = create_default_logger("test_pad_utr", level="DEBUG")
with self.assertLogs("test_pad_utr", "DEBUG") as cmo:
locus.add_transcript_to_locus(transcripts["mikado.Chr5G2.2"])
self.assertIn("mikado.Chr5G2.1", locus.transcripts, cmo.output)
self.assertIn("mikado.Chr5G2.2", locus.transcripts, cmo.output)
locus.pad_transcripts()
for tid in locus:
self.assertEqual(locus[tid].end, locus.end, tid)
def test_remove_redundant(self):
"""Test to verify that the routine to remove redundant transcripts after padding functions as desired"""
tmult = Transcript()
tmult.chrom, tmult.start, tmult.end, tmult.strand, tmult.id = "Chr1", 101, 2000, "+", "foo.1"
tmult.add_exons([(101, 500), (801, 1200), (1501, 2000)])
tmult.add_exons([(201, 500), (801, 1200), (1501, 1700)], features="CDS")
tmult.finalize()
tmult_a = tmult.copy()
tmult_a.id = "foo.1a"
tmono = Transcript()
tmono.chrom, tmono.start, tmono.end, tmono.strand, tmono.id = "Chr1", 601, 1800, "+", "bar.1"
tmono.add_exons([(601, 1800)])
# Note that the CDS *must be in-frame* with tmult
tmono.add_exons([(711, 1580)], features="CDS")
tmono.finalize()
tmono_a = tmono.copy()
tmono_a.id = "bar.1a"
conf = MikadoConfiguration()
comp = Assigner.compare(tmono, tmult)
rev_comp = Assigner.compare(tmult, tmono)
# Make sure that the transcripts can coexist
conf.pick.alternative_splicing.valid_ccodes.extend([comp[0].ccode[0], rev_comp[0].ccode[0]])
conf.pick.alternative_splicing.min_cds_overlap = 0.1
conf.pick.alternative_splicing.min_cdna_overlap = 0.1
conf.pick.alternative_splicing.min_score_perc = 0.01
conf.seed = 10
# Now for the real tests
logger = create_default_logger("test_remove_redundant", level="DEBUG")
for primary, is_reference, score in itertools.product([tmult.id, tmono.id], [False, True], [5, 10, 1]):
with self.subTest():
if primary == tmult.id:
locus = Locus(tmult, configuration=conf)
secondary = tmono.id
alt_secondary = tmono_a.id
locus.add_transcript_to_locus(tmono, check_in_locus=False)
else:
locus = Locus(tmono, configuration=conf)
secondary = tmult.id
alt_secondary = tmult_a.id
locus.add_transcript_to_locus(tmult, check_in_locus=False)
locus.primary_transcript_id = primary
random.seed(conf.seed)
locus.add_transcript_to_locus(tmono_a, check_in_locus=False)
locus.add_transcript_to_locus(tmult_a, check_in_locus=False)
self.assertIn(tmult.id, locus, (primary, is_reference, score))
self.assertIn(tmono.id, locus, (primary, is_reference, score))
self.assertIn(tmult_a.id, locus, (primary, is_reference, score))
self.assertIn(tmono_a.id, locus, (primary, is_reference, score))
locus[tmono.id].score = score
locus[tmono_a.id].score = 5
locus[tmult.id].score = score
locus[tmult_a.id].score = 5
locus[tmult.id].is_reference = locus[tmono.id].is_reference = is_reference
locus.logger = logger
locus._remove_redundant_after_padding()
self.assertTrue(len(locus.transcripts), 2)
if is_reference is False:
if score == 5:
self.assertEqual(locus.primary_transcript_id, primary)
self.assertTrue(secondary in locus or alt_secondary in locus)
elif score == 10:
self.assertEqual(sorted(locus.transcripts.keys()), sorted([tmult.id, tmono.id]),
(primary, is_reference, score))
elif score == 1:
self.assertEqual(sorted(locus.transcripts.keys()), sorted([alt_secondary, primary]),
(primary, is_reference, score))
else:
self.assertEqual(sorted(locus.transcripts.keys()), sorted([primary, secondary]),
(primary, is_reference, score))
# Check that calling again does nothing
current = list(sorted(locus.transcripts.keys()))[:]
locus._remove_redundant_after_padding()
self.assertTrue(current == list(sorted(locus.transcripts.keys())))
# Now check that a single transcript in a locus causes the function to return immediately
locus = Locus(tmult, configuration=conf, logger=logger)
with self.assertLogs(logger.name, level="DEBUG") as cmo:
locus._remove_redundant_after_padding()
self.assertTrue(any([re.search(r"only has one transcript, no redundancy removal needed.", output) is not None
for output in cmo.output]))
def test_ad_three_prime(self):
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
transcripts = self.load_from_bed("Mikado.tests", "pad_three_neg.bed12")
locus = Locus(transcripts["mikado.Chr5G486.1"], logger=logger)
[locus._add_to_alternative_splicing_codes(code) for code in ("J", "j", "G", "h")]
self.assertIn("J", locus.configuration.pick.alternative_splicing.valid_ccodes)
locus.configuration.reference.genome = self.fai.filename.decode()
locus.add_transcript_to_locus(transcripts["mikado.Chr5G486.2"])
# We need to pad
locus.configuration.pick.alternative_splicing.pad = True
locus.configuration.pick.alternative_splicing.ts_distance = 10000
locus.configuration.pick.alternative_splicing.ts_max_splices = 10
locus.configuration.pick.alternative_splicing.ts_distance = 1000
locus.configuration.pick.alternative_splicing.ts_max_splices = 10
locus.configuration.pick.alternative_splicing.only_confirmed_introns = False
locus.configuration.pick.alternative_splicing.min_cdna_overlap = 0.1
self.assertIn("mikado.Chr5G486.1", locus.transcripts.keys())
self.assertIn("mikado.Chr5G486.2", locus.transcripts.keys())
locus.logger.setLevel("DEBUG")
locus.pad_transcripts()
for tid in locus:
self.assertEqual(locus[tid].start, locus.start, tid)
def test_one_off(self):
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
for strand in ("+", "-"):
with self.subTest(strand=strand):
logger.setLevel("WARNING")
t1 = Transcript()
t1.chrom, t1.strand, t1.start, t1.end, t1.id, t1.parent = ["Chr5", strand, 100, 1000, "t1", "loc"]
t1.add_exons([(100, 200), (300, 500), (700, 800), (900, 1000)])
t1.finalize()
loc = Locus(t1, logger=logger)
loc.configuration.reference.genome = self.fai.filename.decode()
# We need these to be padded
loc.configuration.pick.alternative_splicing.ts_distance = 1000
loc.configuration.pick.alternative_splicing.ts_max_splices = 10
loc.configuration.pick.alternative_splicing.only_confirmed_introns = False
loc.configuration.pick.alternative_splicing.min_cdna_overlap = 0.1
t2 = Transcript()
t2.chrom, t2.strand, t2.start, t2.end, t2.id, t2.parent = ["Chr5", strand, 299, 1000, "t2", "loc"]
t2.add_exons([(299, 400), (700, 800), (900, 1000)])
t2.finalize()
loc.add_transcript_to_locus(t2)
self.assertIn(t2.id, loc)
t3 = Transcript()
t3.chrom, t3.strand, t3.start, t3.end, t3.id, t3.parent = ["Chr5", strand, 100, 801, "t3", "loc"]
t3.add_exons([(100, 150), (350, 500), (700, 801)])
t3.finalize()
loc.add_transcript_to_locus(t3)
self.assertIn(t3.id, loc)
t4 = Transcript()
t4.chrom, t4.strand, t4.start, t4.end, t4.id, t4.parent = ["Chr5", strand, 300, 1000, "t4", "loc"]
t4.add_exons([(300, 320), (600, 800), (900, 1000)])
t4.finalize()
self.assertGreaterEqual(t4.cdna_length, 300)
loc.add_transcript_to_locus(t4)
self.assertIn(t4.id, loc)
t5 = Transcript()
t5.chrom, t5.strand, t5.start, t5.end, t5.id, t5.parent = ["Chr5", strand, 100, 800, "t5", "loc"]
t5.add_exons([(100, 140), (360, 650), (700, 800)])
t5.finalize()
loc.add_transcript_to_locus(t5)
loc._load_scores({"t1": 20, "t2": 10, "t3": 10, "t4": 15, "t5": 15})
self.assertIn(t5.id, loc)
self.assertIn(t1.id, loc)
self.assertEqual(loc["t1"].score, 20)
loc.pad_transcripts()
self.assertEqual(loc["t4"].start, 100)
self.assertEqual(loc["t5"].end, 1000)
self.assertEqual(loc["t2"].start, 299)
self.assertEqual(loc["t3"].end, 801)
@mark.slow
def test_complete_padding(self):
transcripts = self.load_from_bed("Mikado.tests", "complete_padding.bed12")
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
for idx in range(1, 5):
self.assertIn('AT5G01030.{}'.format(idx), transcripts.keys(), transcripts.keys())
cds_coordinates = dict()
genome = pkg_resources.resource_filename("Mikado.tests", "chr5.fas.gz")
# Distance of .4 to .1 or .2: 600
# Distance of .3 to .1: 270
# Distance of .3 to .2: 384
for pad_distance, max_splice, coding, best in itertools.product(
(300, 400, 601),
(0, 1, 2, 3),
(False, True,),
tuple(["AT5G01030.1", "AT5G01030.2"])):
with self.subTest(pad_distance=pad_distance, max_splice=max_splice, coding=coding, best=best):
primary = transcripts[best].copy()
if coding is False:
primary.strip_cds()
locus = loci.Locus(primary)
locus.configuration.reference.genome = genome
for t in transcripts:
if t == locus.primary_transcript_id:
continue
trans = transcripts[t].copy()
if coding is False:
trans.strip_cds()
locus.add_transcript_to_locus(trans)
# Now add the scores
scores = {best: 15}
for tid in transcripts.keys():
if tid not in ("AT5G01030.1", "AT5G01030.2"):
scores[tid] = 9
else:
scores[tid] = 10
locus._load_scores(scores=scores)
locus._load_scores(scores=scores)
if coding:
cds_coordinates = dict()
for transcript in locus:
cds_coordinates[transcript] = (
locus[transcript].combined_cds_start, locus[transcript].combined_cds_end)
locus.logger = logger
locus.configuration.pick.alternative_splicing.ts_distance = pad_distance
locus.configuration.pick.alternative_splicing.ts_max_splices = max_splice
locus.logger.setLevel("DEBUG")
locus.pad_transcripts()
locus.logger.setLevel("WARNING")
self.assertEqual(transcripts["AT5G01030.2"].start, 9869)
self.assertEqual(locus[best].start, 9869)
self.assertIn(best, locus)
if max_splice < 2 or pad_distance <= 250:
with self.assertLogs(logger, "DEBUG") as cm:
locus.logger.setLevel("DEBUG")
share = locus._share_five_prime(transcripts["AT5G01030.3"],
transcripts["AT5G01030.1"])
self.assertEqual(False, share, cm.output)
self.assertEqual(locus["AT5G01030.3"].start, transcripts["AT5G01030.3"].start,
(pad_distance, max_splice, coding, best))
self.assertNotIn("padded", locus["AT5G01030.3"].attributes,
(pad_distance, max_splice, coding, best))
else:
self.assertEqual(locus["AT5G01030.3"].start, transcripts["AT5G01030.2"].start,
(locus["AT5G01030.3"].start, pad_distance, max_splice, coding, best))
self.assertTrue(locus["AT5G01030.3"].attributes.get("padded", False),
(pad_distance, max_splice, coding, best))
self.assertEqual(locus["AT5G01030.3"].exons,
[(9869, 10172), (10574, 12665), (12803, 13235)])
if coding:
self.assertEqual(locus["AT5G01030.3"].combined_cds_start,
transcripts["AT5G01030.2"].combined_cds_start)
# self.assertFalse(locus[best].attributes.get("padded", False))
if max_splice < 2 or pad_distance < 600:
self.assertEqual(locus["AT5G01030.4"].end, transcripts["AT5G01030.4"].end)
self.assertNotIn("padded", locus["AT5G01030.4"].attributes,
(pad_distance, max_splice, coding, best))
else:
self.assertEqual(locus["AT5G01030.4"].end, transcripts["AT5G01030.2"].end,
(pad_distance, max_splice, coding, best))
self.assertTrue(locus["AT5G01030.4"].attributes.get("padded", False))
if coding:
self.assertTrue(locus["AT5G01030.4"].is_coding,
locus["AT5G01030.4"])
self.assertTrue(transcripts["AT5G01030.2"].is_coding)
self.assertTrue(locus["AT5G01030.2"].is_coding)
self.assertEqual(locus["AT5G01030.4"].combined_cds_end,
# transcripts["AT5G01030.2"].combined_cds_end,
locus["AT5G01030.2"].combined_cds_end,
"\n".join([locus["AT5G01030.2"].format("bed12"),
locus["AT5G01030.4"].format("bed12")]))
@mark.slow
def test_negative_padding(self):
genome = pkg_resources.resource_filename("Mikado.tests", "neg_pad.fa")
transcripts = self.load_from_bed("Mikado.tests", "neg_pad.bed12")
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="WARNING")
self.assertIn('Human_coding_ENSP00000371111.2.m1', transcripts.keys(), transcripts.keys())
locus = loci.Locus(transcripts['Human_coding_ENSP00000371111.2.m1'], logger=logger)
locus.configuration.reference.genome = genome
for t in transcripts:
if t == locus.primary_transcript_id:
continue
locus.add_transcript_to_locus(transcripts[t])
self.assertEqual(transcripts['Human_coding_ENSP00000371111.2.m1'].combined_cds_end, 1646)
self.assertEqual(transcripts['Human_coding_ENSP00000371111.2.m1'].combined_cds_start, 33976)
self.assertEqual(transcripts['Human_coding_ENSP00000371111.2.m1'].combined_cds_end,
transcripts['Human_coding_ENSP00000371111.2.m1'].start)
self.assertEqual(transcripts['Human_coding_ENSP00000371111.2.m1'].combined_cds_start,
transcripts['Human_coding_ENSP00000371111.2.m1'].end)
cds_coordinates = dict()
for transcript in locus:
cds_coordinates[transcript] = (locus[transcript].combined_cds_start, locus[transcript].combined_cds_end)
corr = {1: "Human_coding_ENSP00000371111.2.m1", # 1645 33976
2: "Mikado_gold_mikado.0G230.1", # 1 34063
3: "ACOCA10068_run2_woRNA_ACOCA10068_r3_0032600.1" # 1032 34095
}
for pad_distance, max_splice in zip((130, 700, 1500, 2000), (1, )):
with self.subTest(pad_distance=pad_distance, max_splice=max_splice):
logger = create_default_logger("logger", level="WARNING")
locus.logger = logger
locus.configuration.pick.alternative_splicing.ts_distance = pad_distance
locus.configuration.pick.alternative_splicing.ts_max_splices = max_splice
locus.logger.setLevel("DEBUG")
with self.assertLogs(logger, level="DEBUG") as pado:
locus.pad_transcripts()
for tid in corr:
self.assertIn(corr[tid], locus.transcripts, corr[tid])
for transcript in locus:
self.assertGreater(locus[transcript].combined_cds_length, 0, transcript)
self.assertEqual(locus[transcript].combined_cds_start, cds_coordinates[transcript][0])
self.assertEqual(locus[transcript].combined_cds_end, cds_coordinates[transcript][1])
if pad_distance > 720: # Ends must be uniform
self.assertEqual(locus[corr[1]].end, locus[corr[3]].end,
([locus[corr[_]].end for _ in range(1, 4)],
locus._share_extreme(transcripts[corr[1]],
transcripts[corr[2]],
three_prime=False))
)
self.assertEqual(locus[corr[1]].end, locus[corr[2]].end,
([locus[corr[_]].end for _ in range(1, 4)],
locus._share_extreme(transcripts[corr[1]],
transcripts[corr[2]],
three_prime=False))
)
elif pad_distance < 20:
self.assertNotEqual(locus[corr[1]].end, locus[corr[3]].end)
self.assertNotEqual(locus[corr[1]].end, locus[corr[2]].end)
self.assertNotEqual(locus[corr[2]].end, locus[corr[3]].end)
if pad_distance >= (abs(transcripts[corr[1]].start - transcripts[corr[2]].start)):
self.assertEqual(locus[corr[1]].start,
locus[corr[2]].start)
self.assertEqual(locus[corr[1]].start,
locus[corr[3]].start)
else:
self.assertNotEqual(locus[corr[1]].start, locus[corr[2]].start,
(abs(transcripts[corr[1]].start - transcripts[corr[2]].start),
pad_distance,
locus._share_extreme(transcripts[corr[1]], transcripts[corr[2]],
three_prime=True)
))
if pad_distance >= (abs(transcripts[corr[1]].start - transcripts[corr[3]].start)):
self.assertEqual(locus[corr[3]].start,
locus[corr[1]].start)
elif pad_distance <= 130:
with self.assertLogs(logger, "DEBUG") as cm:
five_graph = locus.define_graph(objects=transcripts,
inters=locus._share_extreme, three_prime=False)
three_graph = locus.define_graph(objects=transcripts,
inters=locus._share_extreme, three_prime=True)
print(five_graph.edges)
print(three_graph.edges)
boundaries = locus._find_communities_boundaries(five_graph, three_graph)
print(boundaries)
# locus.logger.setLevel("DEBUG")
shared = locus._share_five_prime(transcripts[corr[2]], transcripts[corr[3]])
self.assertTrue(shared is False, cm.output)
shared = locus._share_five_prime(transcripts[corr[1]], transcripts[corr[3]])
self.assertTrue(shared is False, cm.output)
# self.assertEqual()
self.assertNotEqual(locus[corr[3]].start, locus[corr[1]].start,
pado.output)
@mark.slow
def test_padding(self):
genome = pkg_resources.resource_filename("Mikado.tests", "padding_test.fa")
transcripts = self.load_from_bed("Mikado.tests", "padding_test.bed12")
ids = ["mikado.44G2.{}".format(_) for _ in range(1, 6)]
params = {
"mikado.44G2.1": (sum([exon[1] + 1 - max(exon[0], transcripts["mikado.44G2.2"].end)
for exon in transcripts["mikado.44G2.1"].exons
if exon[1] > transcripts["mikado.44G2.2"].end]), 1),
"mikado.44G2.5": (sum([exon[1] + 1 - max(exon[0], transcripts["mikado.44G2.2"].end)
for exon in transcripts["mikado.44G2.5"].exons
if exon[1] > transcripts["mikado.44G2.2"].end]), 4)
}
print(params)
logger = create_default_logger(inspect.getframeinfo(inspect.currentframe())[2], level="INFO")
for pad_distance, max_splice, coding, best in itertools.product((200, 1000, 1200, 5000), (1, 1, 5),
(True, False),
("mikado.44G2.1", "mikado.44G2.5")):
with self.subTest(pad_distance=pad_distance, max_splice=max_splice, coding=coding, best=best):
primary = transcripts[best].copy()
if coding is False:
primary.strip_cds()
locus = loci.Locus(primary)
locus.configuration.reference.genome = genome
for t in transcripts:
if t == locus.primary_transcript_id:
continue
trans = transcripts[t].copy()
if coding is False:
trans.strip_cds()
locus.add_transcript_to_locus(trans)
# Now add the scores
scores = {best: 15}
for tid in ids:
if tid != best:
scores[tid] = 10
locus._load_scores(scores=scores)
if coding:
cds_coordinates = dict()
for transcript in locus:
cds_coordinates[transcript] = (
locus[transcript].combined_cds_start, locus[transcript].combined_cds_end)
locus.logger = logger
locus.configuration.pick.alternative_splicing.ts_distance = pad_distance
locus.configuration.pick.alternative_splicing.ts_max_splices = max_splice
locus.pad_transcripts()
# The .1 transcript can NEVER be expanded, it ends within an intron.
self.assertFalse(locus[best].attributes.get("padded", False))
# self.assertFalse(locus["mikado.44G2.1"].attributes.get("padded", False))
if params[best][0] <= pad_distance and params[best][1] <= max_splice:
for trans in ids:
if trans in params.keys():
continue
self.assertTrue(locus[trans].attributes.get("padded", False),
(locus[trans].id, best, locus[trans].end, pad_distance, max_splice,
params[best],
{item for item in locus[trans].attributes.items() if "ts" in item[0]}))
self.assertEqual(locus[trans].end, locus[best].end,
(locus[trans].id, best, locus[trans].end, pad_distance, max_splice,
params[best],
{item for item in locus[trans].attributes.items() if "ts" in item[0]}
))
else:
for trans in ids:
if trans in params.keys():
continue
self.assertFalse(locus[trans].attributes.get("padded", False),
((locus[trans].id, locus[trans].start, locus[trans].end,
transcripts[trans].start, transcripts[trans].end),
best,
pad_distance, max_splice,
params[best],
{item for item in locus[trans].attributes.items() if "ts" in item[0]}))
self.assertEqual(locus["mikado.44G2.3"].end, locus["mikado.44G2.2"].end)
self.assertEqual(locus["mikado.44G2.4"].end, locus["mikado.44G2.2"].end)
if coding:
for transcript in locus:
self.assertGreater(locus[transcript].combined_cds_length, 0)
self.assertEqual(locus[transcript].combined_cds_start, cds_coordinates[transcript][0])
self.assertEqual(locus[transcript].combined_cds_end, cds_coordinates[transcript][1])
@mark.slow
def test_phasing(self):
transcripts = self.load_from_bed("Mikado.tests", "phasing_padding.bed12")
# We have to test that the CDS is reconstructed correctly even when considering the phasing
genome = self.fai
logger = create_null_logger("test_phasing", level="INFO")
for phase in (0, 1, 2):
with self.subTest(phase=phase):
locus = Locus(transcripts["AT5G01030.1"], logger=logger)
locus.configuration.reference.genome = genome
other = transcripts["AT5G01030.2"].deepcopy()
self.assertNotEqual(other.start, locus["AT5G01030.1"].start)
other.unfinalize()
other.start += (3 - phase) % 3
other.remove_exon((10644, 12665))
other.add_exon((other.start, 12665))
other.add_exon((other.start, 12665), feature="CDS", phase=phase)
with self.assertLogs(other.logger, level="DEBUG") as cmo:
other.finalize()
self.assertTrue(other.is_coding, cmo.output)
self.assertEqual(other.phases[(other.start, 12665)], phase)
self.assertEqual(other.combined_cds_start, other.start)
locus.add_transcript_to_locus(other)
self.assertIn(other.id, locus.transcripts)
locus.logger.setLevel("DEBUG")
locus.pad_transcripts()
# self.assertEqual("", locus[other.id].format("bed12"))
self.assertEqual(locus[other.id].start, transcripts["AT5G01030.1"].start, phase)
self.assertEqual(locus[other.id].combined_cds_start, transcripts["AT5G01030.1"].combined_cds_start)
def test_pad_monoexonic(self):
transcript = Transcript()
transcript.chrom, transcript.strand, transcript.id = "Chr5", "+", "mono.1"
transcript.add_exons([(2001, 3000)])
transcript.finalize()
backup = transcript.deepcopy()
template_one = Transcript()
template_one.chrom, template_one.strand, template_one.id = "Chr5", "+", "multi.1"
template_one.add_exons([(1931, 2500), (2701, 3500)])
template_one.finalize()
logger = create_null_logger("test_pad_monoexonic")
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_one if case % 2 == 0 else False
end = template_one if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, template_one.start)
else:
self.assertEqual(expanded_one.start, transcript.start)
if end:
self.assertEqual(expanded_one.end, template_one.end)
else:
self.assertEqual(expanded_one.end, transcript.end)
# Now monoexonic template
template_two = Transcript()
template_two.chrom, template_two.strand, template_two.id = "Chr5", "+", "multi.1"
template_two.add_exons([(1931, 3500)])
template_two.finalize()
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_two if case % 2 == 0 else False
end = template_two if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, template_two.start)
else:
self.assertEqual(expanded_one.start, transcript.start)
if end:
self.assertEqual(expanded_one.end, template_two.end)
else:
self.assertEqual(expanded_one.end, transcript.end)
# Now monoexonic template
template_three = Transcript()
template_three.chrom, template_three.strand, template_three.id = "Chr5", "+", "multi.1"
template_three.add_exons([(1501, 1700), (1931, 3500), (4001, 5000)])
template_three.finalize()
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_three if case % 2 == 0 else False
end = template_three if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, start.start)
self.assertIn((1501, 1700), expanded_one.exons)
else:
self.assertEqual(expanded_one.start, transcript.start)
self.assertNotIn((1501, 1700), expanded_one.exons)
if end:
self.assertEqual(expanded_one.end, end.end)
self.assertIn((4001, 5000), expanded_one.exons)
else:
self.assertEqual(expanded_one.end, transcript.end)
self.assertNotIn((4001, 5000), expanded_one.exons)
def test_pad_multiexonic(self):
transcript = Transcript()
transcript.chrom, transcript.strand, transcript.id = "Chr5", "+", "mono.1"
transcript.add_exons([(2001, 2400), (2800, 3000)])
transcript.finalize()
backup = transcript.deepcopy()
template_one = Transcript()
template_one.chrom, template_one.strand, template_one.id = "Chr5", "+", "multi.1"
template_one.add_exons([(1931, 2500), (2701, 3500)])
template_one.finalize()
logger = create_null_logger("test_pad_monoexonic")
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_one if case % 2 == 0 else False
end = template_one if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, template_one.start)
else:
self.assertEqual(expanded_one.start, backup.start)
if end:
self.assertEqual(expanded_one.end, template_one.end)
else:
self.assertEqual(expanded_one.end, backup.end)
# Now monoexonic template
template_two = Transcript()
template_two.chrom, template_two.strand, template_two.id = "Chr5", "+", "multi.1"
template_two.add_exons([(1931, 3500)])
template_two.finalize()
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_two if case % 2 == 0 else False
end = template_two if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, template_two.start)
else:
self.assertEqual(expanded_one.start, backup.start)
if end:
self.assertEqual(expanded_one.end, template_two.end)
else:
self.assertEqual(expanded_one.end, transcript.end)
# Now monoexonic template
template_three = Transcript()
template_three.chrom, template_three.strand, template_three.id = "Chr5", "+", "multi.1"
template_three.add_exons([(1501, 1700), (1931, 3500), (4001, 5000)])
template_three.finalize()
for case in range(3):
with self.subTest(case=case):
transcript = backup.deepcopy()
start = template_three if case % 2 == 0 else False
end = template_three if case > 0 else False
expanded_one = pad_transcript(transcript,
transcript.deepcopy(),
start, end, self.fai, logger=logger)
if start:
self.assertEqual(expanded_one.start, start.start)
self.assertIn((1501, 1700), expanded_one.exons)
else:
self.assertEqual(expanded_one.start, backup.start)
self.assertNotIn((1501, 1700), expanded_one.exons)
if end:
self.assertEqual(expanded_one.end, end.end)
self.assertIn((4001, 5000), expanded_one.exons)
else:
self.assertEqual(expanded_one.end, backup.end)
self.assertNotIn((4001, 5000), expanded_one.exons)
def test_expand_multi_end(self):
transcript = Transcript()
transcript.chrom, transcript.strand, transcript.id = "Chr5", "-", "multi.1"
transcript.add_exons([
(12751486, 12751579),
(12751669, 12751808),
(12751895, 12752032),
(12752078, 12752839)])
transcript.finalize()
template = Transcript()
template.chrom, template.strand, template.id = "Chr5", "-", "template"
template.add_exons([
(12751151, 12751579),
(12751669, 12751808),
(12751895, 12752839), # This exon terminates exactly as the last exon of the transcript ends
(12752974, 12753102)
])
template.finalize()
logger = create_null_logger("test_expand_multi_end")
# Now let us expand on both ends
with self.subTest():
expanded = pad_transcript(transcript, transcript.deepcopy(), template, template,
fai=self.fai, logger=logger)
self.assertEqual(expanded.exons,
[(12751151, 12751579),
(12751669, 12751808), (12751895, 12752032),
(12752078, 12752839), (12752974, 12753102)])
def test_expand_both_sides(self):
transcript = Transcript()
transcript.chrom, transcript.strand, transcript.id = "Chr5", "+", "test"
transcript.add_exons([(100053, 100220), (100640, 101832)])
transcript.finalize()
template = Transcript()
template.chrom, template.strand, template.id = "Chr5", "+", "template"
template.add_exons([(99726, 100031), (100657, 102000)])
template.finalize()
with self.subTest():
backup = transcript.deepcopy()
logger = create_null_logger()
pad_transcript(transcript, transcript.deepcopy(), template, template, self.fai, logger=logger)
self.assertEqual(
transcript.exons,
[(99726, 100220), (100640, 102000)]
)
def test_no_expansion(self):
transcript = Transcript()
transcript.chrom, transcript.strand, transcript.id = "Chr5", "+", "test"
transcript.add_exons([(100053, 100220), (100657, 101832)])
transcript.finalize()
backup = transcript.deepcopy()
logger = create_null_logger(level="DEBUG")
with self.assertLogs(logger=logger, level="DEBUG") as cm:
pad_transcript(transcript, backup, None, None, self.fai, logger)
self.assertEqual(transcript, backup)
self.assertIn("DEBUG:null:test does not need to be expanded, exiting", cm.output)
def test_edge_expansion(self):
transcript = Transcript()
transcript.id, transcript.chrom, transcript.strand = "test", "Chr5", "+"
transcript.add_exons([(194892, 195337), (195406, 195511),
(195609, 195694), (195788, 195841),
(195982, 196098), (196207, 196255),
(196356, 196505), (196664, 196725),
(197652, 197987)])
transcript.finalize()
backup = transcript.deepcopy()
start_transcript = Transcript()
start_transcript.id, start_transcript.chrom, start_transcript.strand = "template", "Chr5", "+"
start_transcript.add_exons([(194741, 194891), (195179, 195337), (195406, 195511),
(195609, 195694), (195788, 195841), (195982, 196098),
(196207, 196255), (196356, 196505), (196664, 196725),
(196848, 196943)])
logger = create_null_logger()
with self.assertLogs(logger=logger, level="DEBUG"):
pad_transcript(transcript, backup, start_transcript, False, self.fai, logger)
self.assertNotEqual(transcript, backup)
self.assertEqual(transcript.exons,
[(194741, 195337), (195406, 195511),
(195609, 195694), (195788, 195841),
(195982, 196098), (196207, 196255),
(196356, 196505), (196664, 196725),
(197652, 197987)]
)
def test_swap_single(self):
transcript = Transcript()
transcript.id, transcript.chrom, transcript.strand = "test", "Chr5", "+"
transcript.add_exons([(101, 1000)])
transcript.finalize()
new = transcript.deepcopy()
locus = Locus(transcript)
self.assertEqual(locus.primary_transcript, transcript)
self.assertEqual(len(locus.exons), 1)
# False swap
locus._swap_transcript(transcript, transcript)
new.unfinalize()
new.remove_exon((101, 1000))
new.start, new.end = 51, 1200
new.add_exons([(51, 200), (501, 1200)])
new.finalize()
self.assertEqual(transcript.id, new.id)
locus._swap_transcript(transcript, new)
self.assertEqual(len(locus.exons), 2)
self.assertEqual(locus.exons, set(new.exons))
self.assertEqual(locus.primary_transcript, new)
new2 = transcript.deepcopy()
new2.id = "test2"
with self.assertRaises(KeyError):
locus._swap_transcript(transcript, new2)
def test_swap_see_metrics(self):
transcript = Transcript()
transcript.id, transcript.chrom, transcript.strand = "test", "Chr5", "+"
transcript.add_exons([(101, 1000), (1201, 1500)])
transcript.finalize()
new = transcript.deepcopy()
locus = Locus(transcript)
locus.configuration.pick.alternative_splicing.only_confirmed_introns = False
second = Transcript()
second.id, second.chrom, second.strand = "test2", "Chr5", "+"
second.add_exons([(101, 1000), (1301, 1600)])
second.finalize()
locus.add_transcript_to_locus(second)
self.assertEqual(len(locus.transcripts), 2)
locus.filter_and_calculate_scores()
self.assertAlmostEqual(locus[second.id].exon_fraction, 2/3, places=3)
self.assertAlmostEqual(locus[transcript.id].exon_fraction, 2 / 3, places=3)
self.assertEqual(locus.primary_transcript, transcript)
new.unfinalize()
new.remove_exon((101, 1000))
new.start, new.end = 51, 1500
new.add_exons([(51, 200), (501, 1000)])
new.finalize()
self.assertEqual(transcript.id, new.id)
self.assertEqual(locus.primary_transcript_id, transcript.id)
locus._swap_transcript(transcript, new)
self.assertEqual(locus.primary_transcript, new)
self.assertEqual(locus.exons, {(51, 200), (501, 1000), (101, 1000), (1301, 1600), (1201, 1500)})
locus.filter_and_calculate_scores()
self.assertAlmostEqual(locus[second.id].exon_fraction, 2 / 5, places=3)
self.assertAlmostEqual(locus[transcript.id].exon_fraction, 3 / 5, places=3)
if __name__ == '__main__':
unittest.main(verbosity=2)
| lgpl-3.0 |
Fireblend/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
samstern/MSc-Project | pybrain/rl/environments/timeseries/timeseries.py | 1 | 6887 | __author__ = 'Sam Stern, [email protected]'
from random import gauss
from pybrain.rl.environments.environment import Environment
from numpy import array, matrix, empty, append
from math import log, exp
import pandas as pd
import csv
import time
class TSEnvironment(Environment):
""" test time-series environment for artificial data-set"""
def __init__(self):
"""Initialize environment randomly"""
self.time=0
self.action=[]
self.actionHistory=array([-1.0])
self.ts=None
def getSensors(self):
""" the currently visible state of the world (the observation may be
stochastic - repeated calls returning different values)
:rtype: by default, this is assumed to be a numpy array of doubles
"""
t=self.time
currentState=self.worldState[t]
return currentState
def performAction(self, action):
""" perform an action on the world that changes it's internal state (maybe
stochastically).
:key action: an action that should be executed in the Environment.
:type action: by default, this is assumed to be a numpy array of doubles
"""
self.action=action
self.actionHistory=append(self.actionHistory,action)
def incrementTime(self):
self.time+=1
def reset(self):
"""set time back to the start"""
self.time=0
self.actionHistory=array([-1.0])
@property
def indim(self):
return len(self.action)
@property
def outdim(self):
return len(self.sensors)
# loads in data from csv file
class MarketEnvironment(TSEnvironment):
def __init__(self,*args):
super(MarketEnvironment,self).__init__()
if len(args)==0:
inData=self.loadData() #input data as pandas dataframe
print(type(inData))
elif len(args)==1:
inData=args[0]
print(type(inData))
else:
print('something went wrong. The market environment expects at most one argement')
#self.ts=inData['RETURNS'].shift(-1).as_matrix()
self.ts=inData['RETURNS'].as_matrix()
self.worldState=self.createWorldState(inData)
pass
#self.ts=self.dataMat[:,0] #just the returns timeseries
def createWorldState(self,inData):
# when making a decision at time t, only use data as recent as t-1
state=inData.shift().ix[1:]
return state.as_matrix()
def loadData(self):
#read in csv file where the dates are the keys
data=pd.read_csv('data/data1.csv',parse_dates=['DATE'],index_col='DATE')
#insert a percenage returns column
data['RETURNS']=data['Price'].diff()#pct_change()
#make sure data is complete
data=data.dropna()
cols=data.columns.tolist()
cols=cols[-1:]+cols[:-1]
data=data[cols]
data=data.drop('Price',1) #don't want the price
return data
# Special case of random walk with autoregressive unit root
class RWEnvironment(TSEnvironment):
def __init__(self,tsLength):
super(RWEnvironment,self).__init__()
self.tsLength=tsLength
self.alpha=0.9
self.k=3.0
self.n=10
self.reset()
pass
def reset(self):
super(RWEnvironment,self).reset()
tsLength=self.tsLength
self.betas=RWEnvironment._createBetaSerits(self.tsLength,self.alpha)
self.logPS=RWEnvironment._createLogPS(tsLength,self.betas,self.k)
self.ts=RWEnvironment._createTS(tsLength,self.logPS)
self.worldState=array(RWEnvironment._createWorldState(tsLength,self.ts,self.n))
self.ts=array(self.ts[self.n:])
@staticmethod
def _createTS(tsLength,ps):
R=max(ps)-min(ps)
z=[exp(ps[i]/R) for i in range(tsLength)]
ts=[100*((z[i]/z[i-1])-1) for i in range(1,tsLength)]
return ts
@staticmethod
def _createLogPS(tsLength,betas,k): #log price series
ts=[0 for i in range(tsLength)]
ts[0]=gauss(0.0,1.0)
for i in range(1,tsLength):
ts[i]=ts[i-1]+betas[i-1]+k*gauss(0,1)
return ts
@staticmethod
def _createBetaSerits(tsLength,alpha):
ts=[0 for i in range(tsLength)]
for i in range(1,tsLength):
ts[i]=alpha*ts[i-1]+gauss(0,1)
return ts
@staticmethod
def _createWorldState(tsLength,ts,n):
state=[[0 for i in range(n)] for i in range(tsLength-n)]
for i in range(tsLength-n):
state[i]=ts[i:i+n]
return state
# Special case of AR(1) process
class AR1Environment(TSEnvironment):
def __init__(self,tsLength):
super(AR1Environment,self).__init__()
self.tsLength=tsLength
self.rho=0.99 #order of autoregression
self.ts=AR1Environment.__createTS(tsLength,self.rho)
self.worldState=[array(self.ts[i]) for i in range(len(self.ts))]
self.ts=self.ts[1:]
@staticmethod
def __createTS(tsLength,rho):
ts=[0 for i in range(tsLength)]
#ts = array([0.0 for x in range(tsLength)])
for i in range(1,tsLength):
ts[i]=rho*ts[i-1]+gauss(0.0,0.2)
return ts
# Special case of SnP returns Environment
class DailySnPEnvironment(TSEnvironment):
def __init__(self):
super(DailySnPEnvironment,self).__init__()
self.ts=DailySnPEnvironment.__importSnP()
@staticmethod
def __importSnP():
import csv
with open('pybrain/rl/environments/timeseries/SnP_data.csv','r') as f:
data = [row for row in csv.reader(f.read().splitlines())]
price=[]
[price.append(data[i][4]) for i in range(1,len(data))]
price.reverse()
price=map(float,price)
rets=matrix([(price[i]-price[i-1])/price[i-1] for i in range(len(price))])
return rets
class MonthlySnPEnvironment(TSEnvironment):
def __init__(self):
super(MonthlySnPEnvironment,self).__init__()
ts, dates=MonthlySnPEnvironment.__importSnP()
self.ts=ts
self.dates=dates
@staticmethod
def __importSnP():
with open('SnP_data.csv','r') as f:
data = [row for row in csv.reader(f.read().splitlines())]
data.pop(0) #get rid of the labels
dates=[]
price=[]
dailyLogRets=[]
monthlyLogRets=[0.0]
j=0
for i in range(len(data)):
price.insert(0,float(data[i][4]))
dates.insert(0,time.strptime(data[i][0],"%d/%m/%y"))
if i>0:
dailyLogRets.insert(0,log(price[1])-log(price[0]))
if dates[0][1]==dates[1][1]: #if the months are the same
monthlyLogRets[0]+=dailyLogRets[0]
else:
monthlyLogRets.insert(0,0)
return matrix(monthlyLogRets), dates
| bsd-3-clause |
jwass/folium | folium/utilities.py | 1 | 4372 | # -*- coding: utf-8 -*-
'''
Utilities
-------
Utility module for Folium helper functions.
'''
from __future__ import print_function
from __future__ import division
import math
from jinja2 import Environment, PackageLoader, Template
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
def get_templates():
'''Get Jinja templates'''
return Environment(loader=PackageLoader('folium', 'templates'))
def color_brewer(color_code):
'''Generate a colorbrewer color scheme of length 'len', type 'scheme.
Live examples can be seen at http://colorbrewer2.org/'''
schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6', '#66C2A4', '#41AE76',
'#238B45', '#005824'],
'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA', '#8C96C6', '#8C6BB1',
'#88419D', '#6E016B'],
'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5', '#7BCCC4', '#4EB3D3',
'#2B8CBE', '#08589E'],
'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84', '#FC8D59', '#EF6548',
'#D7301F', '#990000'],
'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB', '#74A9CF', '#3690C0',
'#0570B0', '#034E7B'],
'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB', '#67A9CF', '#3690C0',
'#02818A', '#016450'],
'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7', '#DF65B0', '#E7298A',
'#CE1256', '#91003F'],
'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5', '#F768A1', '#DD3497',
'#AE017E', '#7A0177'],
'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E', '#78C679', '#41AB5D',
'#238443', '#005A32'],
'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB', '#41B6C4', '#1D91C0',
'#225EA8', '#0C2C84'],
'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F', '#FE9929', '#EC7014',
'#CC4C02', '#8C2D04'],
'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C', '#FD8D3C', '#FC4E2A',
'#E31A1C', '#B10026']}
return schemes.get(color_code, None)
def transform_data(data):
'''Transform Pandas DataFrame into JSON format
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series
Returns
-------
JSON compatible dict
Example
-------
>>>transform_data(df)
'''
if pd is None:
raise ImportError("The Pandas package is required for this functionality")
if np is None:
raise ImportError("The NumPy package is required for this functionality")
def type_check(value):
'''Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explictly converted.'''
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
if isinstance(data, pd.Series):
json_data = [{type_check(x): type_check(y) for x, y in data.iteritems()}]
elif isinstance(data, pd.DataFrame):
json_data = [{type_check(y): type_check(z) for x, y, z in data.itertuples()}]
return json_data
def split_six(series=None):
'''Given a Pandas Series, get a domain of values from zero to the 90% quantile
rounded to the nearest order-of-magnitude integer. For example, 2100 is rounded
to 2000, 2790 to 3000.
Parameters
----------
series: Pandas series, default None
Returns
-------
list
'''
if pd is None:
raise ImportError("The Pandas package is required for this functionality")
def base(x):
if x > 0:
base = pow(10, math.floor(math.log10(x)))
return round(x/base)*base
else:
return 0
quants = [0, 0.5, 0.75, 0.85, 0.9]
return [base(series.quantile(x)) for x in quants]
| mit |
Jimmy-Morzaria/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
Ninad998/FinalYearProject | PythonScripts/CNNModelCreatorWordLSTM.py | 1 | 14897 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import numpy as np
np.random.seed(123)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Embedding, Convolution1D, MaxPooling1D, LSTM
from keras.layers import Input, Merge, Dense
from keras.layers import Dropout
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
databaseConnectionServer = 'srn02.cs.cityu.edu.hk'
documentTable = 'document'
def readVectorData(fileName, GLOVE_DIR = 'glove/'):
print('Level = Word')
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, fileName))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('File used: %s' % (fileName))
print('Found %s word vectors.' % (len(embeddings_index)))
return embeddings_index
def loadAuthData(authorList, doc_id, chunk_size = 1000, samples = 3200):
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
import DatabaseQuery
from sshtunnel import SSHTunnelForwarder
PORT=5432
with SSHTunnelForwarder((databaseConnectionServer, 22),
ssh_username='stylometry',
ssh_password='stylometry',
remote_bind_address=('localhost', 5432),
local_bind_address=('localhost', 5400)):
textToUse = DatabaseQuery.getWordAuthData(5400, authorList, doc_id,
documentTable = documentTable, chunk_size = chunk_size)
labels = []
texts = []
size = []
authorList = textToUse.author_id.unique()
for auth in authorList:
current = textToUse.loc[textToUse['author_id'] == auth]
size.append(current.shape[0])
print("Author: %5s Size: %5s" % (auth, current.shape[0]))
print("Min: %s" % (min(size)))
print("Max: %s" % (max(size)))
authorList = authorList.tolist()
for auth in authorList:
current = textToUse.loc[textToUse['author_id'] == auth]
if (samples > min(size)):
samples = min(size)
current = current.sample(n = samples)
textlist = current.doc_content.tolist()
texts = texts + textlist
labels = labels + [authorList.index(author_id) for author_id in current.author_id.tolist()]
labels_index = {}
labels_index[0] = 0
for i, auth in enumerate(authorList):
labels_index[i] = auth
del textToUse
print('Authors %s.' % (str(authorList)))
print('Found %s texts.' % len(texts))
print('Found %s labels.' % len(labels))
return (texts, labels, labels_index, samples)
def loadDocData(authorList, doc_id, chunk_size = 1000):
texts = [] # list of text samples
labels = [] # list of label ids
import DatabaseQuery
from sshtunnel import SSHTunnelForwarder
PORT=5432
with SSHTunnelForwarder((databaseConnectionServer, 22),
ssh_username='stylometry',
ssh_password='stylometry',
remote_bind_address=('localhost', 5432),
local_bind_address=('localhost', 5400)):
textToUse = DatabaseQuery.getWordDocData(5400, doc_id, documentTable = documentTable,
chunk_size = chunk_size)
labels = []
texts = []
for index, row in textToUse.iterrows():
labels.append(authorList.index(row.author_id))
texts.append(row.doc_content)
del textToUse
print('Found %s texts.' % len(texts))
return (texts, labels)
def preProcessTrainVal(texts, labels, chunk_size = 1000, MAX_NB_WORDS = 40000, VALIDATION_SPLIT = 0.2):
global tokenizer, word_index
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=chunk_size)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
from sklearn.model_selection import train_test_split
trainX, valX, trainY, valY = train_test_split(data, labels, test_size=VALIDATION_SPLIT)
del data, labels
return (trainX, trainY, valX, valY)
def makeTokenizer():
global tokenizer, word_index
import cPickle as pickle
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
def preProcessTest(texts, labels_index, labels = None, chunk_size = 1000, MAX_NB_WORDS = 40000):
# finally, vectorize the text samples into a 2D integer tensor
sequences = tokenizer.texts_to_sequences(texts)
print('Found %s unique tokens.' % len(word_index))
X = pad_sequences(sequences, maxlen = chunk_size)
print('Shape of data tensor:', X.shape)
testX = X[:]
if labels is not None:
testY = labels[:]
return (testX, testY)
return (testX)
def prepareEmbeddingMatrix(embeddings_index, MAX_NB_WORDS = 40000, EMBEDDING_DIM = 200):
global nb_words, embedding_matrix
nb_words = MAX_NB_WORDS
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
def compileModel(classes, embedding_matrix, EMBEDDING_DIM = 200, chunk_size = 1000, CONVOLUTION_FEATURE = 256,
BORDER_MODE = 'valid', LSTM_FEATURE = 256, DENSE_FEATURE = 256,
DROP_OUT = 0.5, LEARNING_RATE = 0.01, MOMENTUM = 0.9):
global sgd
ngram_filters = [3, 4] # Define ngrams list, 3-gram, 4-gram, 5-gram
convs = []
graph_in = Input(shape=(chunk_size, EMBEDDING_DIM))
for n_gram in ngram_filters:
conv = Convolution1D( # Layer X, Features: 256, Kernel Size: ngram
nb_filter=CONVOLUTION_FEATURE, # Number of kernels or number of filters to generate
filter_length=n_gram, # Size of kernels, ngram
activation='relu')(graph_in) # Activation function to use
pool = MaxPooling1D( # Layer X a, Max Pooling: 3
pool_length=3)(conv) # Size of kernels
lstm = LSTM( # Layer X b, Output Size: 256
output_dim=LSTM_FEATURE)(pool) # Features: 256
convs.append(lstm)
model = Sequential()
model.add(Embedding( # Layer 0, Start
input_dim=nb_words + 1, # Size to dictionary, has to be input + 1
output_dim=EMBEDDING_DIM, # Dimensions to generate
weights=[embedding_matrix], # Initialize word weights
input_length=chunk_size, # Define length to input sequences in the first layer
trainable=False)) # Disable weight changes during training
model.add(Dropout(0.25)) # Dropout 25%
out = Merge(mode='concat')(convs) # Layer 1, Output Size: Concatted ngrams feature maps
graph = Model(input=graph_in, output=out) # Concat the ngram convolutions
model.add(graph) # Concat the ngram convolutions
model.add(Dropout(DROP_OUT)) # Dropout 50%
model.add(Dense( # Layer 3, Output Size: 256
output_dim=DENSE_FEATURE, # Output dimension
activation='relu')) # Activation function to use
model.add(Dense( # Layer 4, Output Size: Size Unique Labels, Final
output_dim=classes, # Output dimension
activation='softmax')) # Activation function to use
sgd = SGD(lr=LEARNING_RATE, momentum=MOMENTUM, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,
metrics=['accuracy'])
print("Done compiling.")
return model
def recompileModel(classes, embedding_matrix, EMBEDDING_DIM = 200, chunk_size = 1000, CONVOLUTION_FEATURE = 256,
BORDER_MODE = 'valid', LSTM_FEATURE = 256, DENSE_FEATURE = 256,
DROP_OUT = 0.5, LEARNING_RATE = 0.01, MOMENTUM = 0.9):
global sgd
ngram_filters = [3, 4] # Define ngrams list, 3-gram, 4-gram, 5-gram
convs = []
graph_in = Input(shape=(chunk_size, EMBEDDING_DIM))
for n_gram in ngram_filters:
conv = Convolution1D( # Layer X, Features: 256, Kernel Size: ngram
nb_filter=CONVOLUTION_FEATURE, # Number of kernels or number of filters to generate
filter_length=n_gram, # Size of kernels, ngram
activation='relu')(graph_in) # Activation function to use
pool = MaxPooling1D( # Layer X a, Max Pooling: 3
pool_length=3)(conv) # Size of kernels
lstm = LSTM( # Layer X b, Output Size: 256
output_dim=LSTM_FEATURE)(pool) # Features: 256
convs.append(lstm)
model = Sequential()
model.add(Embedding( # Layer 0, Start
input_dim=nb_words + 1, # Size to dictionary, has to be input + 1
output_dim=EMBEDDING_DIM, # Dimensions to generate
weights=[embedding_matrix], # Initialize word weights
input_length=chunk_size, # Define length to input sequences in the first layer
trainable=False)) # Disable weight changes during training
model.add(Dropout(0.25)) # Dropout 25%
out = Merge(mode='concat')(convs) # Layer 1, Output Size: Concatted ngrams feature maps
graph = Model(input=graph_in, output=out) # Concat the ngram convolutions
model.add(graph) # Concat the ngram convolutions
model.add(Dropout(DROP_OUT)) # Dropout 50%
model.add(Dense( # Layer 3, Output Size: 256
output_dim=DENSE_FEATURE, # Output dimension
activation='relu')) # Activation function to use
model.add(Dense( # Layer 4, Output Size: Size Unique Labels, Final
output_dim=classes, # Output dimension
activation='softmax')) # Activation function to use
sgd = SGD(lr=LEARNING_RATE, momentum=MOMENTUM, nesterov=True)
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = "author-cnn-ngrams-lstm-word.hdf5"
filepath = dir_path + "/" + filename
model.load_weights(filepath)
model.compile(loss='categorical_crossentropy', optimizer=sgd,
metrics=['accuracy'])
print("Done compiling.")
return model
def fitModel(model, trainX, trainY, valX, valY, nb_epoch=30, batch_size=10):
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = "author-cnn-ngrams-lstm-word.hdf5"
filepath = dir_path + "/" + filename
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Function to take input of data and return fitted model
history = model.fit(trainX, trainY, validation_data=(valX, valY),
nb_epoch=nb_epoch, batch_size=batch_size,
callbacks=callbacks_list)
# load weights from the best checkpoint
model.load_weights(filepath)
# Compile model again (required to make predictions)
model.compile(loss='categorical_crossentropy', optimizer=sgd,
metrics=['accuracy'])
train_acc = (model.evaluate(trainX, trainY))[1]
print("\n\nFinal Train Accuracy: %.2f" % (train_acc * 100))
val_acc = (model.evaluate(valX, valY))[1]
print("\nFinal Test Accuracy: %.2f" % (val_acc * 100))
import cPickle as pickle
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
return (model, history, train_acc, val_acc)
def predictModel(model, testX, batch_size=10):
# Function to take input of data and return prediction model
predY = np.array(model.predict(testX, batch_size=batch_size))
predYList = predY[:]
entro = []
flag = False
import math
for row in predY:
entroval = 0
for i in row:
if(i <= 0):
flag = True
pass
else:
entroval += (i * (math.log(i , 2)))
entroval = -1 * entroval
entro.append(entroval)
if(flag == False):
yx = zip(entro, predY)
yx = sorted(yx, key = lambda t: t[0])
newPredY = [x for y, x in yx]
predYEntroList = newPredY[:int(len(newPredY)*0.5)]
predY = np.mean(predYEntroList, axis=0)
else:
predY = np.mean(predYList, axis=0)
return (predYList, predY)
| mit |
HeraclesHX/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
lhoang29/vowpal_wabbit | python/tests/test_sklearn_vw.py | 1 | 5425 | from collections import namedtuple
import numpy as np
import pytest
from vowpalwabbit.sklearn_vw import VW, VWClassifier, VWRegressor, tovw
from sklearn import datasets
from sklearn.exceptions import NotFittedError
from scipy.sparse import csr_matrix
"""
Test utilities to support integration of Vowpal Wabbit and scikit-learn
"""
Dataset = namedtuple('Dataset', 'x, y')
@pytest.fixture(scope='module')
def data():
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x = x.astype(np.float32)
return Dataset(x=x, y=y)
class TestVW:
def test_validate_vw_estimator(self):
"""
Run VW and VWClassifier through the sklearn estimator validation check
Note: the VW estimators fail sklearn's estimator validation check. The validator creates a new
instance of the estimator with the estimator's default args, '--quiet' in VW's case. At some point
in the validation sequence it calls fit() with some fake data. The data gets formatted via tovw() to:
2 1 | 0:0.5488135039273248 1:0.7151893663724195 2:0.6027633760716439 3:0.5448831829968969 4:0.4236547993389047 5:0.6458941130666561 6:0.4375872112626925 7:0.8917730007820798 8:0.9636627605010293 9:0.3834415188257777
This gets passed into vw.learn and the python process dies with the error, "Process finished with exit code 139"
At some point it would probably be worth while figuring out the problem this and getting the two estimators to
pass sklearn's validation check
"""
# check_estimator(VW)
# check_estimator(VWClassifier)
def test_init(self):
assert isinstance(VW(), VW)
def test_fit(self, data):
model = VW(loss_function='logistic')
assert not hasattr(model, 'fit_')
model.fit(data.x, data.y)
assert model.fit_
def test_passes(self, data):
n_passes = 2
model = VW(loss_function='logistic', passes=n_passes)
assert model.passes_ == n_passes
model.fit(data.x, data.y)
weights = model.get_coefs()
model = VW(loss_function='logistic')
# first pass weights should not be the same
model.fit(data.x, data.y)
assert not np.allclose(weights.data, model.get_coefs().data)
def test_predict_not_fit(self, data):
model = VW(loss_function='logistic')
with pytest.raises(NotFittedError):
model.predict(data.x[0])
def test_predict(self, data):
model = VW(loss_function='logistic')
model.fit(data.x, data.y)
assert np.isclose(model.predict(data.x[:1][:1])[0], 0.406929)
def test_predict_no_convert(self):
model = VW(loss_function='logistic', convert_to_vw=False)
model.fit(['-1 | bad', '1 | good'])
assert np.isclose(model.predict(['| good'])[0], 0.245515)
def test_set_params(self):
model = VW()
assert 'l' not in model.params
model.set_params(l=0.1)
assert model.params['l'] == 0.1
# confirm model params reset with new construction
model = VW()
assert 'l' not in model.params
def test_get_coefs(self, data):
model = VW()
model.fit(data.x, data.y)
weights = model.get_coefs()
assert np.allclose(weights.indices, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 116060])
def test_get_intercept(self, data):
model = VW()
model.fit(data.x, data.y)
intercept = model.get_intercept()
assert isinstance(intercept, float)
def test_oaa(self):
X = ['1 | feature1:2.5',
'2 | feature1:0.11 feature2:-0.0741',
'3 | feature3:2.33 feature4:0.8 feature5:-3.1',
'1 | feature2:-0.028 feature1:4.43',
'2 | feature5:1.532 feature6:-3.2']
model = VW(convert_to_vw=False, oaa=3)
model.fit(X)
assert np.allclose(model.predict(X), [ 1., 2., 3., 1., 2.])
class TestVWClassifier:
def test_init(self):
assert isinstance(VWClassifier(), VWClassifier)
def test_decision_function(self, data):
classes = np.array([-1., 1.])
raw_model = VW(loss_function='logistic')
raw_model.fit(data.x, data.y)
predictions = raw_model.predict(data.x)
class_indices = (predictions > 0).astype(np.int)
expected = classes[class_indices]
model = VWClassifier()
model.fit(data.x, data.y)
actual = model.predict(data.x)
assert np.allclose(expected, actual)
class TestVWRegressor:
def test_init(self):
assert isinstance(VWRegressor(), VWRegressor)
def test_predict(self, data):
raw_model = VW()
raw_model.fit(data.x, data.y)
model = VWRegressor()
model.fit(data.x, data.y)
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
# ensure model can make multiple calls to predict
assert np.allclose(raw_model.predict(data.x), model.predict(data.x))
def test_delete(self):
raw_model = VW()
del raw_model
def test_tovw():
x = np.array([[1.2, 3.4, 5.6, 1.0, 10], [7.8, 9.10, 11, 0, 20]])
y = np.array([1, -1])
w = [1, 2]
expected = ['1 1 | 0:1.2 1:3.4 2:5.6 3:1 4:10',
'-1 2 | 0:7.8 1:9.1 2:11 4:20']
assert tovw(x=x, y=y, sample_weight=w) == expected
assert tovw(x=csr_matrix(x), y=y, sample_weight=w) == expected
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| mit |
larsmans/scikit-learn | sklearn/tests/test_dummy.py | 1 | 16685 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
glouppe/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
alfkjartan/nvgimu | nvg/ximu/kinematics.py | 1 | 28618 | """ General functions for calculating kinematics """
__version__ = '0.1'
__author__ = 'Kjartan Halvorsen'
import numpy as np
import math
import itertools
import unittest
import matplotlib.pyplot as pyplot
import matplotlib.dates as mdates
from scipy.interpolate import interp1d
import scipy.optimize as optimize
from scipy.integrate import cumtrapz
from scipy.signal import detrend, bessel, filtfilt
import scipy.io as sio
from datetime import datetime, timedelta, date
from nvg.maths import quaternions as quat
from nvg.algorithms import orientation
#from nvg.utilities import time_series
from nvg.ximu import pointfinder
from cyclicpython import cyclic_path
from cyclicpython.algorithms import kinematics as cpkinematics
#from cyclicpython.algorithms import fomatlab as fomatlab
from cyclicpython.algorithms import ekf as cpekf
from cyclicpython.algorithms import detect_peaks
from cyclicpython import cyclic_planar as cppl
#-------------------------------------------------------------------------------
# Callable classes that estimate orientation. It is assumed that the data
# provided is for one single gait cycle
# the following arguments:
# tvec -> A numpy (N,) array of time stamps for each data point
# gyro -> A numpy (N,3) array of gyro data in [rad/s]
# acc -> A numpy (N,3) array of accelerations [m/s^2]
# mag -> A numpy (N,3) array of magnetometer data
#
# Returns
# qEst <- List of QuaternionArrays. One item per cycle
#
# Any other parameters the algorithm depends upon is set during instantiation
#-------------------------------------------------------------------------------
class CyclicEstimator:
def __init__(self, nHarmonics, detrendData=True, doPlots = False):
self.nHarmonics = nHarmonics
self.detrendData = detrendData
self.doPlots = doPlots
def estimate(self, imudta, doPlots):
"""
DEPRECATED. Use callable __call__ instead
Runs the cyclic orientation method assuming that the imud is a single cycledta
"""
dt = 1.0/256.0
tvec = imudta[:,0]*dt
#accdta = imudta[:,4:7]*9.82
gyrodta = imudta[:,1:4]*np.pi/180.0
magdta = imudta[:,7:10]
omega = 2*np.pi/ (tvec[-1] - tvec[0])
(qEst, bEst) = cyclic_path.estimate_cyclic_orientation(tvec, gyrodta,
magdta, omega, self.nHarmonics)
tvec.shape = (len(tvec), 1)
return np.hstack((tvec, qEst))
def __call__(self, tvec, gyro, acc, mag,
gyroref=None, accref=None, magref=None):
omega = 2*np.pi/ (tvec[-1] - tvec[0])
if self.detrendData:
w = detrend(gyro, type='constant', axis=0)
else:
w = gyro
(qE, bE) = cyclic_path.estimate_cyclic_orientation(
tvec, w, mag, omega, self.nHarmonics)
q = quat.QuaternionArray(qE)
if (accref is not None) and (magref is not None):
phi = angle_to_accref(q, acc, accref, gyro, magref)
else:
phi = None
return (q, phi)
class GyroIntegratorOrientation:
def __init__(self, detrendData=True, doPlots = False):
self.doPlots = doPlots
self.detrendData = detrendData
def estimate(self, imudta, doPlots):
"""
DEPRECATED. Use callable __call__ instead
Runs the cyclic orientation method assuming that the imud is a single cycledta
"""
imuq = np.zeros((imudta.shape[0], 5))
initRotation = quat.Quaternion(1,0,0,0)
gMagn = 9.82
deg2rad = np.pi/180.0
imudtaT = imudta.T
t = imudta[0,0]*self.dt
orientFilter = None
for i in range(0,imuq.shape[0]):
if i == 0:
orientFilter = orientation.GyroIntegrator(t, initRotation)
else:
t = imudta[i,0]*self.dt
orientFilter(imudtaT[4:7,i]*gMagn, imudtaT[7:10,i],
imudtaT[1:4,i]*deg2rad, t)
imuq[i,0] = t
imuq[i,1] = orientFilter.rotation.latestValue.w
imuq[i,2] = orientFilter.rotation.latestValue.x
imuq[i,3] = orientFilter.rotation.latestValue.y
imuq[i,4] = orientFilter.rotation.latestValue.z
if doPlots: # Check results
pyplot.figure()
pyplot.plot(imuq[:,0], imuq[:,1:5])
#return [imuq, cycledtainds]
return imuq
def __call__(self, tvec, gyro, acc, mag,
gyroref=None, accref=None, magref=None):
if self.detrendData:
w = detrend(gyro, type='constant', axis=0)
else:
w = gyro
initRotation = quat.Quaternion(1,0,0,0)
orientFilter = orientation.GyroIntegrator(tvec[0]-0.001, initRotation)
for (t_, a_, m_, g_) in itertools.izip(tvec, acc, mag, w):
orientFilter(a_, m_, g_, t_)
q = orientFilter.rotation.values
if (accref is not None) and (magref is not None):
phi = angle_to_accref(q, acc, accref, gyro, magref)
else:
phi = None
return (q, phi)
class EKFOrientation:
def estimate(self, imudta, doPlots):
imuq = np.zeros((imudta.shape[0], 5))
initRotation = quat.Quaternion(1,0,0,0)
initRotVelocity = np.zeros((3,1))
initCov = np.diag(np.array([1, 1, 1, 1e-1, 1e-1, 1e-1, 1e-1]))
measuremCov = np.diag(np.array([1e-1, 1e-1, 1e-1, 1e-1, 1e-1, 1e-1, 1e-1]))
procNoiseVar = 0.5 # rad^2/s^2
tau = 0.5 # s, time constant of cont time model of movement
orientFilter = None
dt = 1/256.0 # s per packet number
gMagn = 9.82
deg2rad = np.pi/180.0
imudtaT = imudta.T
cycleind = 0
cycledta.append(1e+12)
cycledtainds = []
for i in range(imuq.shape[0]):
t = imudta[i,0]*dt
if (int(imudta[i,0]) >= cycledta[cycleind]) or i == 0:
# Start of new cycle
#if orientFilter != None:
#initialRotVelocity = orientFilter.rotationalVelocity.latestValue
#initCov = np.diag(np.array([1e-1, 1e-1, 1e-1, 1e-1, 1e-1, 1e-1, 1e-1]))
orientFilter = orientation.GyroIntegrator(t, initRotation)
#initRotVelocity,
# initCov, measuremCov, procNoiseVar, tau)
if i != 0:
cycleind += 1
cycledtainds.append(i)
else:
orientFilter(imudtaT[4:7,i]*gMagn, imudtaT[7:10,i], imudtaT[1:4,i]*deg2rad, t)
imuq[i,0] = t
imuq[i,1] = orientFilter.rotation.latestValue.w
imuq[i,2] = orientFilter.rotation.latestValue.x
imuq[i,3] = orientFilter.rotation.latestValue.y
imuq[i,4] = orientFilter.rotation.latestValue.z
cycledta.pop()
if doPlots: # Check results
pyplot.figure()
pyplot.plot(imuq[:,0], imuq[:,1:5])
for ind in cycledta:
pyplot.plot([dt*ind, dt*ind], [-1, 1], 'm')
#return [imuq, cycledtainds]
return imuq
class angle_to_vertical_integrator_tracker(object):
"""
Callable that tracks the angle to vertical of a segment by integrating
the angular velocity projected onto the sagittal plane.
"""
def __init__(self, sagittalDir, vertRefDir=np.array([-1.0, 0, 0]),
g=9.82, gThreshold=1e-1, plotResults=False):
self.sagittalDir = sagittalDir
self.vertRefDir = vertRefDir
self.g = g
self.gThreshold = gThreshold
self.plotResults = plotResults
self.tvec = None
self.yinc = None
self.phi = None
self.gyrodta = None
self.accdta = None
def __call__(self, tvec, acc, gyro, mag):
# Get the inclination measurements
horRef = np.cross(self.sagittalDir, self.vertRefDir)
(tauK, yinc) = inclination(acc, self.vertRefDir, horRef,
self.g, self.gThreshold)
# Integrate the projected zero-mean gyro data
w = detrend(np.dot(gyro, self.sagittalDir), type='constant')
wint = cumtrapz(w, tvec)
wint = np.insert(wint, 0, wint[0])
# translate the angle (add constant offset) to best match inclination
# measurements
# phi[tauK] = wint[tauK] + offset = yinc
# 1*offset = yinc - wint[tauK]
# offset = 1' * (yinc - wint[tauK])/len(tauK)
phi = wint + np.mean(yinc - wint[tauK])
self.phi = phi
self.yinc = np.column_stack( (tvec[tauK], yinc))
self.tvec = tvec
self.gyrodta = gyro
self.accdta = acc
# Return a Quaternion array
return [quat.QuaternionFromAxis(self.sagittalDir, phi_) for phi_ in self.phi]
class angle_to_vertical_ekf_tracker(object):
"""
Callable that tracks the angle to vertical of a segment using a
fixed-lag EKF.
"""
def __init__(self, sagittalDir, vertRefDir=np.array([-1.0, 0, 0]),
var_angvel=1e-2, var_incl=1e-1, m=None,
g = 9.82, gThreshold=1e-1,
plotResults = False):
self.sagittalDir = sagittalDir
self.vertRefDir = vertRefDir
self.var_angvel = var_angvel
self.var_incl = var_incl
self.m = m
self.g = g
self.gThreshold = gThreshold
self.plotResults = plotResults
self.tvec = None
self.yinc = None
self.phi = None
self.gyrodta = None
self.accdta = None
def __call__(self, tvec, acc, gyro):
(phi, yincl) = cpekf.track_planar_vertical_orientation(tvec,
acc, gyro,
self.sagittalDir,
self.var_angvel,
self.var_incl,
self.m,
vertRefDir=self.vertRefDir,
g=self.g,
gThreshold=self.gThreshold,
plotResults=self.plotResults)
self.phi = phi
self.yinc = yincl
self.tvec = tvec
self.gyrodta = gyro
self.accdta = acc
return [quat.QuaternionFromAxis(self.sagittalDir, phi_) for phi_ in self.phi]
class angle_to_vertical_cyclic_tracker(object):
"""
Callable that tracks the angle to vertical of a segment using the planar
cyclic method.
The orienation is defined by the single angle phi, which is defined as
"""
def __init__(self, omega, nHarmonics,
sagittalDir, vertRefDir=np.array([-1.0, 0, 0]),
var_angvel=1, var_incl=1e-1,
lambda_gyro=1, lambda_incl=0.1,
solver=cppl.solve_QP, g=9.82, gThreshold=1e-1,
plotResults = False):
self.sagittalDir=sagittalDir
self.vertRefDir=vertRefDir
self.omega = omega
self.nHarmonics = nHarmonics
self.solver=solver
self.lambda_gyro = lambda_gyro
self.lambda_incl = lambda_incl
self.var_gyro = var_angvel
self.var_incl = var_incl
self.g = g
self.gThreshold = gThreshold
self.plotResults = plotResults
self.link = None
self.tvec = None
self.yinc = None
self.phi = None
self.gyrodta = None
self.accdta = None
def __call__(self, tvec, acc, gyro):
link = cppl.Link(tvec, acc, gyro, self.sagittalDir, self.vertRefDir)
if self.omega is None:
# One cycle in data
T = tvec[-1] - tvec[0]
omega = 2*np.pi/T
else:
omega = self.omega
link.estimate_planar_cyclic_orientation(omega, self.nHarmonics,
g=self.g,
gThreshold=self.gThreshold,
var_gyro=self.var_gyro,
var_incl=self.var_incl,
lambda_gyro=self.lambda_gyro,
lambda_incl=self.lambda_incl,
solver=self.solver)
self.link = link
self.phi = link.phi
self.yinc = link.yinc
self.tvec = link.tvec
self.gyrodta = link.gyrodta
self.accdta = link.accdta
return [quat.QuaternionFromAxis(self.sagittalDir, phi_) for phi_ in self.phi]
#-------------------------------------------------------------------------------
# Callable classes that estimate displacement. It is assumed that the data
# provided is for one single gait cycle
# the following arguments:
# tvec -> A numpy (N,) array of time stamps for each data point
# acc -> A numpy (N,3) array of accelerations [m/s^2]
# gyro -> A numpy (N,3) array of angular velocities [rad/s]
# mag -> A numpy (N,3) array of magnetometer data
#
# qq -> A QuaternionArray with estiamted orientation of the IMU
#
# Returns
# d, v, g <- Numpy arraus (N,3) with displacement, velocity and g_vector
# The displacement is in the reference frame that is rotated by
# qq.
#
# Any other parameters the algorithm depends upon is set during instantiation
#-------------------------------------------------------------------------------
class IntegrateAccelerationDisplacementEstimator:
def __call__(self, tvec, acc, gyro, mag, qq,
accref=None, gyroref=None, magref=None):
# Rotate acceleration measurements, then remove linear trend.
# Rotate acceleration vectors
acc_S = qq.rotateVector(acc.T).T
# Since the IMU returns to the same position, the average acceleration
# must be one g pointing upwards.
g = np.mean(acc_S, axis=0)
gN = g / np.linalg.norm(g) # Normalize
acc_S_detrend = detrend(acc_S, type='constant', axis=0)
vel = cumtrapz(acc_S_detrend, tvec, axis=0)
vel = np.insert(vel, 0, vel[0], axis=0)
vel_detrend = detrend(vel, type='constant', axis=0)
disp = cumtrapz(vel_detrend, tvec, axis=0)
disp = np.insert(disp, 0, disp[0], axis=0)
# Determine the sagittal direction
gyro_S = qq.rotateVector(gyro.T).T
sagittalDir = sagittal_direction_from_gyro(gyro_S, magref)
return (disp, vel, gN, sagittalDir)
class CyclicPlanarDisplacementEstimator:
def __init__(self, nHarmonics):
self.nHarmonics = nHarmonics
def estimate(self, imudta, sagittalDir, doPlots):
"""
Runs the cyclic planar displacement method assuming that the
imud is a single cycledta
"""
dt = 1.0/256.0
tvec = imudta[:,0]*dt
#accdta = imudta[:,4:7]*9.82
gyrodta = imudta[:,1:4]*np.pi/180.0
magdta = imudta[:,7:10]
omega = 2*np.pi/ (tvec[-1] - tvec[0])
(qEst, bEst) = cyclic_path.estimate_cyclic_orientation(tvec, gyrodta,
magdta, omega, self.nHarmonics)
tvec.shape = (len(tvec), 1)
return np.hstack((tvec, qEst))
class sagittal_plane_displacement_integrator_tracker(object):
"""
Callable that tracks the displacement in the sagittal plane by
integrating the acceleration twice using the cumtrapz function
"""
def __init__(self, sagittalDir, vertRefDir=np.array([-1.0, 0, 0]),
g=9.82, gThreshold=1e-1, plotResults=False):
self.sagittalDir=sagittalDir
self.vertRefDir=vertRefDir
self.g = g
self.gThreshold = gThreshold
self.plotResults = plotResults
self.tvec = None
self.yinc = None
self.phi = None
self.gyrodta = None
self.accdta = None
self.angleTracker = angle_to_vertical_integrator_tracker(sagittalDir,
vertRefDir,
g, gThreshold,
plotResults)
def __call__(self, tvec, acc, qE):
#qE = self.angleTracker(tvec, acc, gyro)
phi = self.angleTracker.phi
RLG = cppl.R_LG(phi, self.sagittalDir, self.vertRefDir)
accPlanar = cppl.rotate_vectors(RLG, acc, transpose=True)
accPlanar -= np.mean(accPlanar, axis=0)
velPlanar = cumtrapz(accPlanar, tvec, axis=0)
velPlanar = np.reshape(np.insert(velPlanar, 0, velPlanar[0]), (len(tvec), 3))
velPlanar -= np.mean(velPlanar, axis=0)
dispPlanar = cumtrapz(velPlanar, tvec, axis=0)
dispPlanar = np.reshape(np.insert(dispPlanar, 0, dispPlanar[0]), (len(tvec), 3))
self.phi = phi
self.tvec = tvec
self.gyrodta = gyro
self.accdta = acc
# Calculate displacement in 3D using the direction of vertical and forward
return (dispPlanar, acc, accPlanar)
class sagittal_plane_displacement_cyclic_tracker(object):
"""
Callable that tracks the displacement in the sagittal plane using the planar
cyclic method.
"""
def __init__(self, omega, nHarmonics,
sagittalDir, vertRefDir=np.array([-1.0, 0, 0]),
var_angvel=1, var_incl=1e-1, var_acc=1,
lambda_gyro=1, lambda_incl=0.1, lambda_acc=1,
solver=cppl.solve_QP):
self.sagittalDir=sagittalDir
self.vertRefDir=vertRefDir
self.omega = omega
self.nHarmonics = nHarmonics
self.solver=solver
self.lambda_gyro = lambda_gyro
self.lambda_incl = lambda_incl
self.lambda_acc = lambda_acc
self.var_gyro = var_angvel
self.var_incl = var_incl
self.var_acc = var_acc
self.link = None
self.tvec = None
self.yinc = None
self.phi = None
self.gyrodta = None
self.accdta = None
def __call__(self, tvec, acc, gyro, g=9.82, gThreshold=1e-1,
plotResults=False):
link = cppl.Link(tvec, acc, gyro, self.sagittalDir, self.vertRefDir)
if self.omega is None:
# One cycle in data
T = tvec[-1] - tvec[0]
omega = 2*np.pi/T
else:
omega = self.omega
link.estimate_planar_cyclic_orientation(omega, self.nHarmonics,
g=g, gThreshold=gThreshold,
var_gyro=self.var_gyro,
var_incl=self.var_incl,
lambda_gyro=self.lambda_gyro,
lambda_incl=self.lambda_incl,
solver=self.solver,
plotResults=plotResults)
link.estimate_planar_cyclic_displacement( self.nHarmonics, g=g,
lambd=self.lambda_acc,
solver=self.solver)
self.link = link
self.phi = link.phi
self.yinc = link.yinc
self.tvec = link.tvec
self.gyrodta = link.gyrodta
if plotResults:
acc_G = link.accdtaSagittal # Zero-mean acc data
pyplot.figure()
pyplot.subplot(211)
pyplot.plot(link.tvec, acc_G[:,0])
pyplot.plot(link.tvec[[0, -1]],
np.mean(acc_G[:,0])*np.array([1,1]))
pyplot.plot(link.tvec, link.acc[:,0], linewidth=2)
pyplot.plot(link.tvec[[0, -1]],
np.mean(link.acc[:,0])*np.array([1,1]),
linewidth=2)
pyplot.title("Acceleration in vertical direction")
pyplot.subplot(212)
pyplot.plot(link.tvec, acc_G[:,1])
pyplot.plot(link.tvec[[0, -1]],
np.mean(acc_G[:,1])*np.array([1,1]))
pyplot.plot(link.tvec, link.acc[:,1], linewidth=2)
pyplot.plot(link.tvec[[0, -1]],
np.mean(link.acc[:,1])*np.array([1,1]),
linewidth=2)
pyplot.title("Acceleration in horizontal direction")
#1/0
return (link.disp, link.acc, link.accdtaSagittal)
def fix_cycles(ics, k=2, plotResults=False):
""" Checks the PNAtICLA attribute, computes the
0.25, 0.5 and 0.75 quantiles. Determines then the start and end of each cycle
so that only cycles with length that is within median +/- k*interquartiledistance
are kept.
The start and end events are returned as a list of two-tuples.
"""
if len(ics) == 0:
warnings.warn("Unexpected empty set of events!")
return []
steplengths = np.array([ics[i]-ics[i-1] for i in range(1,len(ics))])
medq = np.median(steplengths)
q1 = np.percentile(steplengths, 25)
q3 = np.percentile(steplengths, 75)
interq = q3-q1
lowthr = medq - k*interq
#lowthr = 0.0
highthr = medq + k*interq
cycles = [(start_, stop_) for (stepl_, start_, stop_) \
in itertools.izip(steplengths, ics[:-2], ics[1:]) \
if (stepl_ > lowthr and stepl_ < highthr)]
if plotResults:
pyplot.figure()
pyplot.hist(steplengths, 60)
pyplot.plot([lowthr, lowthr], [0, 10], 'r')
pyplot.plot([highthr, highthr], [0, 10], 'r')
pyplot.hist([(stop_-start_) for (start_, stop_) in cycles], 60, color='g')
return cycles
def angle_to_vertical(upper, lower, vertDir=[0., 0., 1], sagittalDir=None):
"""
Calculates the angle to the vertical based on two markers upper and lower.
If sagittalDir is provided it must be a unit vector in the direction normal
to the sagittal plane, and the angle is the signed angle in the plane
and taken to be positive for a positive rotation about sagittalDir to get from
the vertical to the direction from lower to upper.
If sagittalDir is None, the space angle (always positive) is returned.
Arguments:
upper, lower -> markers (N, 3) numpy arrays
"""
vert = np.asarray(vertDir)
vec = upper - lower
# Normalize the vectors
norms = np.apply_along_axis(np.linalg.norm, 1, vec )
normVecT = vec.T/norms # Will be (3,N)
vec = normVecT.T
if sagittalDir is None:
#costheta = np.dot(vert, normVecT)
#return np.arccos(costheta)
# Find the sagittal plane.
vecVel = np.diff(vec, axis=0)
# These velocities lies in the sagittal plane
vecVel = vecVel[~np.isnan(np.sum(vecVel, axis=1))]
(U,S,V) = np.linalg.svd(np.dot(vecVel.T, vecVel))
sDir = V[-1]
if sDir[0] > 0: # Should be approximately negative x
# Flip around sDir
sDir = -sDir
else:
sDir = np.asarray(sagittalDir)
# Calculate the angle to the vertical. A positive angle means the vertical
# vector is rotated to the direction of the segment vec by a positive
# rotation about sDir. This means that vertDir, vec, sDir forms a
# right-handed triple.
return np.arcsin( np.dot(np.cross(vert, vec), sDir) )
# Old stuff
# Make sagittal plane horizontal
sDir -= np.dot(vert, sDir)*vert
sDir = sDir / np.linalg.norm(sDir)
# The forward unit vector in the left-side sagittal plane
fwd = np.cross(sDir, vert)
#1/0
# Calculating the angle
return np.arctan2( np.dot(fwd, vec.T), np.dot(vert, vec.T) )
def inclination(acc, vertRef, horRef, g, gThreshold):
"""
Calculates the inclination. The inclination is positive if the imu is
rotated in the positive direction of the axis as given by self.d
For the lower-limb this means that z points to the left, x points
upwards, and y points forward. The angle phi is with respect to the
x-direction and is positive when the g-vector is in the fourth
(vertRef, -horRef) quadrant.
"""
N = len(acc)
tauK = [ k for k in range(N)
if np.isclose(np.linalg.norm(acc[k]), g, atol=gThreshold) ]
yinc = np.array( [ np.arctan2(np.dot(acc[k], -horRef),
np.dot(acc[k], vertRef))
for k in tauK ] )
return (tauK, yinc)
def angle_to_accref(q, acc, accref, gyro=None, magref=None):
"""
Calculates the 3D angle between the g vector (taken as the average of the
acceleration measurements rotated into a static frame) and the g vector
taken as the acceleration at the reference measurement (standing still)
Arguments
q -> QuaternionArray with rotations of the IMU wrt the first data
sample
acc -> (N,3) numpy array, acceleration measurements
accref -> (N,) numpy array, acceleration at reference measurement
gyro -> (N,3) gyro data. Used to determine a unit vector normal to the
main plane of movement (the sagittal plane). Default is
None, in which case an unsigned space angle is returned.
magref -> (N,) reference magnetometer data at reference measurement. Used to determine sign of the unit vector
defining the plane of
Returns
phi <- (N,) numpy array, angle
"""
# Rotate acceleration vectors
acc_S = q.rotateVector(acc.T).T
# Since the IMU returns to the same position, the average acceleration
# must be one g pointing upwards.
g = np.mean(acc_S, axis=0)
gN = g / np.linalg.norm(g) # Normalize
# Now rotate the g-vector according to the orientation q. This will given
# The vertical direction upwards in the frame of the IMU.
gb = q.rotateFrame(gN).T
# accref gives the reference acceleration during standing still.
# This is the reference vertical direction.
gref = accref / np.linalg.norm(accref)
if (gyro is None) or (magref is None):
# Don't know how to determine the main plane of rotation/movement, and
# so return unsigned angle
return np.arccos(np.dot(gb, gref))
else:
# Determine the main plane of movement as the principle axis of the gyro
# samples. Assume to be sagittal direction
sagittalDir = sagittal_direction_from_gyro(gyro, magref)
# DEBUG
#pyplot.figure()
#pyplot.plot(acc_S)
#pyplot.show()
qarr = q
#1/0
# The z-component of the reference magnetometer is positive for left
# side IMUs and negative for right-side IMUs. So the multiplication
# with the sign function makes sure the
# signal makes sure that the sagittal direction is positive local z
# for the left side of the body and negative local z for the right side
# Return a signed angle. Assume that a right-handed rotation about the
# sagittal direction from current vertical to reference vertical. this
# will correspond to a positive rotation from the reference to the
# current orientation of the segment
return np.arcsin( np.dot(np.cross(gb, gref), sagittalDir) )
def sagittal_direction_from_gyro(gyro, magref=None):
"""
Calculates the direction normal to the sagittal plane by finding the main
axis of the covariance matrix of the gyro data.
Arguments
gyro -> (N,3) numpy array with gyro data
magref -> (3,) numpy array with magnetic field at reference measurement
Optional. If not given, then the vector returned could point
either to the left or to the right
"""
gcov = np.cov(gyro.T)
eigenValues, eigenVectors = np.linalg.eig(gcov)
idx = eigenValues.argsort()[::-1]
eigenValues = eigenValues[idx]
eigenVectors = eigenVectors[:,idx]
sagittalDir = eigenVectors[:,0]
if magref is not None:
sagittalDir *= np.sign(magref[2]*sagittalDir[2])
return sagittalDir
| gpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/config_init.py | 5 | 12812 | """
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
import pandas.core.config as cf
from pandas.core.config import (is_int, is_bool, is_text, is_float,
is_instance_factory, is_one_of_factory,
get_default_val)
from pandas.core.format import detect_console_encoding
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when printing
out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc = """
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc = """
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc = """
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See core.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_line_width_doc = """
: int
Deprecated.
"""
pc_line_width_deprecation_warning = """\
line_width has been deprecated, use display.width instead (currently both are
identical)
"""
pc_height_deprecation_warning = """\
height has been deprecated.
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_height_doc = """
: int
Deprecated.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions then specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_mpl_style_doc = """
: bool
Setting this to 'default' will modify the rcParams used by matplotlib
to give plots a more pleasing visual style by default.
Setting this to None/False restores the values to their initial value.
"""
pc_memory_usage_doc = """
: bool or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called.
"""
style_backup = dict()
def mpl_style_cb(key):
import sys
from pandas.tools.plotting import mpl_stylesheet
global style_backup
val = cf.get_option(key)
if 'matplotlib' not in sys.modules.keys():
if not(val): # starting up, we get reset to None
return val
raise Exception("matplotlib has not been imported. aborting")
import matplotlib.pyplot as plt
if val == 'default':
style_backup = dict([(k, plt.rcParams[k]) for k in mpl_stylesheet])
plt.rcParams.update(mpl_stylesheet)
elif not val:
if style_backup:
plt.rcParams.update(style_backup)
return val
with cf.config_prefix('display'):
cf.register_option('precision', 7, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc)
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))))
cf.register_option('max_rows', 60, pc_max_rows_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('max_categories', 8, pc_max_categories_doc, validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
cf.register_option('max_columns', 20, pc_max_cols_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('large_repr', 'truncate', pc_large_repr_doc,
validator=is_one_of_factory(['truncate', 'info']))
cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,
validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, 'truncate']))
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('mpl_style', None, pc_mpl_style_doc,
validator=is_one_of_factory([None, False, 'default']),
cb=mpl_style_cb)
cf.register_option('height', 60, pc_height_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('width', 80, pc_width_doc,
validator=is_instance_factory([type(None), int]))
# redirected to width, make defval identical
cf.register_option('line_width', get_default_val('display.width'),
pc_line_width_doc)
cf.register_option('memory_usage', True, pc_memory_usage_doc,
validator=is_instance_factory([type(None), bool]))
cf.deprecate_option('display.line_width',
msg=pc_line_width_deprecation_warning,
rkey='display.width')
cf.deprecate_option('display.height',
msg=pc_height_deprecation_warning,
rkey='display.max_rows')
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('mode'):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_null_cb(key):
from pandas.core.common import _use_inf_as_null
_use_inf_as_null(key)
with cf.config_prefix('mode'):
cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,
cb=use_inf_as_null_cb)
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix('mode'):
cf.register_option('chained_assignment', 'warn', chained_assignment,
validator=is_one_of_factory([None, 'warn', 'raise']))
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
'{default}' (the default){others}.
"""
with cf.config_prefix('io.excel'):
# going forward, will be additional writers
for ext, options in [('xls', ['xlwt']),
('xlsm', ['openpyxl'])]:
default = options.pop(0)
if options:
options = " " + ", ".join(options)
else:
options = ""
doc = writer_engine_doc.format(ext=ext, default=default,
others=options)
cf.register_option(ext + '.writer', default, doc, validator=str)
def _register_xlsx(engine, other):
cf.register_option('xlsx.writer', engine,
writer_engine_doc.format(ext='xlsx',
default=engine,
others=", '%s'" % other),
validator=str)
try:
# better memory footprint
import xlsxwriter
_register_xlsx('xlsxwriter', 'openpyxl')
except ImportError:
# fallback
_register_xlsx('openpyxl', 'xlsxwriter')
| mit |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter04/clustering_kmeans_search.py | 1 | 3295 | # this is needed to load helper from the parent folder
import sys
sys.path.append('..')
# the rest of the imports
import helper as hlp
import pandas as pd
import sklearn.cluster as cl
import numpy as np
@hlp.timeit
def findClusters_kmeans(data, no_of_clusters):
'''
Cluster data using k-means
'''
# create the classifier object
kmeans = cl.KMeans(
n_clusters=no_of_clusters,
n_jobs=-1,
verbose=0,
n_init=30
)
# fit the data
return kmeans.fit(data)
def findOptimalClusterNumber(
data,
keep_going = 1,
max_iter = 30
):
'''
A method that iteratively searches for the
number of clusters that minimizes the Davis-Bouldin
criterion
'''
# the object to hold measures
measures = [666]
# starting point
n_clusters = 2
# counter for the number of iterations past the local
# minimum
keep_going_cnt = 0
stop = False # flag for the algorithm stop
def checkMinimum(keep_going):
'''
A method to check if minimum found
'''
global keep_going_cnt # access global counter
# if the new measure is greater than for one of the
# previous runs
if measures[-1] > np.min(measures[:-1]):
# increase the counter
keep_going_cnt += 1
# if the counter is bigger than allowed
if keep_going_cnt > keep_going:
# the minimum is found
return True
# else, reset the counter and return False
else:
keep_going_cnt = 0
return False
# main loop
# loop until minimum found or maximum iterations reached
while not stop and n_clusters < (max_iter + 2):
# cluster the data
cluster = findClusters_kmeans(data, n_clusters)
# assess the clusters effectiveness
labels = cluster.labels_
centroids = cluster.cluster_centers_
# store the measures
measures.append(
hlp.davis_bouldin(data,labels, centroids)
)
# check if minimum found
stop = checkMinimum(keep_going)
# increase the iteration
n_clusters += 1
# once found -- return the index of the minimum
return measures.index(np.min(measures)) + 1
# the file name of the dataset
r_filename = '../../Data/Chapter04/bank_contacts.csv'
# read the data
csv_read = pd.read_csv(r_filename)
# select variables
selected = csv_read[['n_duration','n_nr_employed',
'prev_ctc_outcome_success','n_euribor3m',
'n_cons_conf_idx','n_age','month_oct',
'n_cons_price_idx','edu_university_degree','n_pdays',
'dow_mon','job_student','job_technician',
'job_housemaid','edu_basic_6y']]
# find the optimal number of clusters; that is, the number of
# clusters that minimizes the Davis-Bouldin criterion
optimal_n_clusters = findOptimalClusterNumber(selected)
print('Optimal number of clusters: {0}' \
.format(optimal_n_clusters))
# cluster the data
cluster = findClusters_kmeans(selected, optimal_n_clusters)
# assess the clusters effectiveness
labels = cluster.labels_
centroids = cluster.cluster_centers_
hlp.printClustersSummary(selected, labels, centroids) | gpl-2.0 |
jonwright/ImageD11 | sandbox/diffracCl/diffracCl.py | 1 | 6696 | from __future__ import print_function, division
import pyopencl as cl
import numpy
import numpy as np
import time, sys, os
import ctypes
import timeit
timer = timeit.default_timer
if "win" in sys.platform:
dll = ctypes.CDLL("diffracCl.dll")
print("# time.clock", sys.platform, sys.version)
else:
dll = ctypes.CDLL("./diffracCl.so")
print("# time.time", sys.platform, sys.version)
import pyopencl.version
print("# pyopencl:",pyopencl.version.VERSION)
class myCL:
def __init__(self, npx):
self.ctx = cl.create_some_context()
for d in self.ctx.devices:
print("#",d.platform.name)
print("#",d.vendor)
print("#",d.name)
self.npx = npx
self.queue = cl.CommandQueue(self.ctx)
self.pars = np.zeros(14, dtype=np.float32)
def loadProgram(self, filename):
#read in the OpenCL source file as a string
f = open(filename, 'r')
fstr = "".join(f.readlines())
#create the program
self.program = cl.Program(self.ctx, fstr).build(
[ '-cl-fast-relaxed-math' ])
# '-cl-mad-enable',
# '-cl-no-signed-zeros',
# '-w'] )
def popCorn(self):
mf = cl.mem_flags
nb = self.npx[0]*self.npx[1]*4
#create OpenCL buffers
self.par_buf = cl.Buffer(self.ctx,
mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf = self.pars )
self.tth_buf = cl.Buffer(self.ctx, mf.WRITE_ONLY, nb )
self.eta_buf = cl.Buffer(self.ctx, mf.WRITE_ONLY, nb )
self.tthl = np.empty( self.npx, np.float32 )
self.etal = np.empty( self.npx, np.float32 )
def execute(self):
# start = timer()
evtcompute = self.program.tthetaf(self.queue,
self.npx,
None,
#(32,32),
self.tth_buf,
self.eta_buf,
self.par_buf)
# is_blocking=False )
#evtcompute.wait()
#print timer()-start
evtt = cl.enqueue_copy( self.queue,
self.tthl,
self.tth_buf,
wait_for = [evtcompute],
is_blocking=False)
evte = cl.enqueue_copy( self.queue,
self.etal,
self.eta_buf,
wait_for = [evtcompute, evtt],
is_blocking=False)
evtcompute.wait()
evtt.wait()
evte.wait()
return self.tthl, self.etal
def setpars(self, pars ):
self.pars = pars
# print pars
evt = cl.enqueue_copy( self.queue,
self.par_buf,
self.pars, is_blocking=True)
class ctp:
def __init__(self, npx):
self.npx = npx
if "win" in sys.platform:
fname = "diffracCl.dll"
else:
fname = "./diffracCl.so"
self.dll = ctypes.CDLL( fname )
self.dll.ttheta.argtypes=[ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float),
ctypes.c_int,
ctypes.c_int]
def compute( self, tth, eta, p):
t = tth.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
e = eta.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
p = pars.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
times = []
# warmup
self.dll.ttheta(t , e , p, self.npx[0], self.npx[1] )
for i in range(10):
start = timer()
self.dll.ttheta(t , e , p, self.npx[0], self.npx[1] )
times.append( timer() - start )
return times
def make_pars( parfile ):
from ImageD11.parameters import parameters
from ImageD11 import transform
p = parameters( )
p.loadparameters( parfile )
rmat = transform.detector_rotation_matrix(
float(p.parameters["tilt_x"]),
float(p.parameters["tilt_y"]),
float(p.parameters["tilt_z"]) )
fmat = np.array( [[ 1 , 0 , 0],
[ 0 , float(p.parameters['o11']), float(p.parameters['o12']) ],
[ 0 , float(p.parameters['o21']), float(p.parameters['o22']) ]],
np.float32)
pars = np.array( [ float(p.parameters["y_center"]) ,
float(p.parameters["y_size"]) ,
float(p.parameters["z_center"]) ,
float(p.parameters["z_size"]) ,
float(p.parameters["distance"]) ] +
list( np.dot( rmat, fmat).ravel() ) ,
np.float32)
return pars
if __name__ == "__main__":
start = timer()
npx = 1024,2048
example = myCL(npx)
example.loadProgram("diffracCl.cl")
example.popCorn()
pars = make_pars(sys.argv[1])
example.setpars( pars )
print("# Init", timer()-start)
times = []
# Warmup
tth, eta = example.execute()
for i in range(10):
start = timer()
tth_cl, eta_cl = example.execute()
times.append(timer()-start)
times = np.array(times)
print("# mean min max std")
print("%.4f %.4f %.4f %.4f"%( times.mean(), times.min(),
times.max(), times.std()))
t = np.median(times)
print("%.1f ms, %.1f fps,"%(1e3*t,1.0/t), end=' ')
print(tth.max(),tth.min())
eta_ct = np.empty( npx, np.float32)
tth_ct = np.empty( npx, np.float32)
o = ctp( npx )
times = np.array( o.compute( tth_ct, eta_ct, pars ) )
print("# ctypes module, hopefully with openmp")
print("# mean min max std")
print("%.4f %.4f %.4f %.4f"%( times.mean(), times.min(),
times.max(), times.std()))
t = np.median(times)
print("%.1f ms, %.1f fps,"%(1e3*t,1.0/t), end=' ')
print(tth.max(),tth.min())
# Check same ness
eta_err = (abs(eta_cl - eta_ct)).mean()
tth_err = (abs(tth_cl - tth_ct)).mean()
print("Mean diff tth,eta",tth_err,eta_err)
if len(sys.argv)>2:
from matplotlib.pylab import imshow, figure, show, colorbar, title
figure(1)
title("OpenCL")
imshow(eta_cl)
colorbar()
figure(2)
title("Ctypes")
imshow(eta_ct)
colorbar()
show()
| gpl-2.0 |
MillerCMBLabUSC/lab_analysis | apps/simulation/new_pointing.py | 1 | 6415 | import ephem
import datetime
import numpy as np
import matplotlib.pyplot as plt
from lab_analysis.apps.simulation import default_settings
from lab_analysis.apps.simulation import datetimes
from lab_analysis.libs.geometry import coordinates
np.set_printoptions(edgeitems = 50)
class CreatePointing(default_settings.SimulatorSettings):
'''
Class: CreatePointing
Purpose: set of functions to generate a set up a scan strategy and generate a list of coordinates
Attributes: inheriting from SimulatorSettings class from default_settings file
'''
def make_boresight_pointing(self):
'''
Function: make_boresight_pointing
Purpose: creates a pointing scheme and generates an array of coordinates. The simulation code
calls this function to generate pointing.
Inputs: none
Outputs:
-> ra_data (float): array of right ascension (ra) coordinates
-> dec_data (float): array of declination (dec) coordinates
-> horiz_roll (float): array of horizontal roll values
'''
self.scan_setup()
self.scan_sequence()
return self.ra_data, self.dec_data, self.horiz_roll
def scan_setup(self):
'''
Function: scan_setup
Purpose: defining some useful values based on the inputs from default_settings file.
eg: number of steps, number of data points, azimuth/elevation mins/maxs, etc.
Also generates an array of datetimes
Inputs: none
Outputs: none
'''
self.n_stp = int(self.el_rng/self.el_stp) + 1 #number of steps
l = self.az_rng*(self.n_stp) + self.el_rng*2 #total length of path of scan
self.num_data_points = float(l)/self.dt #defining number of data points
#print self.num_data_points
self.datetimes = datetimes.generate_datetimes() #generating array of datetimes to use for conversion
#establishing boundary of scan
self.az_min = self.az_0 - (self.az_rng/2) #min az
self.az_max = self.az_0 + (self.az_rng/2) #max az
self.el_min = self.el_0 - (self.el_rng/2) #min el
self.el_max = self.el_0 + (self.el_rng/2) #max el
def scan_sequence(self):
'''
Function: scan_sequence
Purpose: generates the array of coordinates according to the scan strategy. In this case, it is a
zig-zag pattern with incremental steps in elevation.
'''
self.horiz_roll = np.zeros(int(self.num_data_points)) #setting horizontal roll to zero
#first step of scan; initializing data arrays
self.ra_data = np.zeros(0)
self.dec_data = np.zeros(0)
self.az_data = np.zeros(0)
self.el_data = np.zeros(0)
#starting out at az_min, el_min
current_az = self.az_min
current_el = self.el_min
self.az_data = np.append(self.az_data, current_az)
self.el_data = np.append(self.el_data, current_el)
current_ra, current_dec = self.transform_coords(current_az, current_el)
self.ra_data = np.append(self.ra_data, current_ra)
self.dec_data = np.append(self.dec_data, current_dec)
j = 0 #parameter to keep track of elevation increments
flag = 0 #parameter to establish zigzag pattern
for i in range (1, int(self.num_data_points)): #starts from 1 because we already did 1st step of scan
if flag == 0:
if current_az < self.az_max:
#current_az moves from az_min to az_max while el stays the same
current_az += self.az_rng * self.dt
current_el = self.el_min + j * self.el_stp
self.az_data = np.append(self.az_data, current_az)
self.el_data = np.append(self.el_data, current_el)
current_ra, current_dec = self.transform_coords(current_az, current_el)
self.ra_data = np.append(self.ra_data, current_ra)
self.dec_data = np.append(self.dec_data, current_dec)
elif current_az >= self.az_max:
#once current_az reaches az_max, it stays there while el increases
current_el += self.el_stp * self.dt
self.az_data = np.append(self.az_data, current_az)
self.el_data = np.append(self.el_data, current_el)
current_ra, current_dec = self.transform_coords(current_az, current_el)
self.ra_data = np.append(self.ra_data, current_ra)
self.dec_data = np.append(self.dec_data, current_dec)
if current_el >= self.el_min + (j+1) * self.el_stp:
#if current_el is done incrementing, increase step# and switch flag
j += 1
flag = 1
elif flag == 1:
if current_az > self.az_min:
#current_az moves from az_max to az_min while el stays the same
current_az -= self.az_rng * self.dt
current_el = self.el_min + j * self.el_stp
self.az_data = np.append(self.az_data, current_az)
self.el_data = np.append(self.el_data, current_el)
current_ra, current_dec = self.transform_coords(current_az, current_el)
self.ra_data = np.append(self.ra_data, current_ra)
self.dec_data = np.append(self.dec_data, current_dec)
elif current_az <= self.az_min:
#once current_az reaches az_min, it stays there while el increases
current_el += self.el_stp * self.dt
self.az_data = np.append(self.az_data, current_az)
self.el_data = np.append(self.el_data, current_el)
current_ra, current_dec = self.transform_coords(current_az, current_el)
self.ra_data = np.append(self.ra_data, current_ra)
self.dec_data = np.append(self.dec_data, current_dec)
if current_el >= self.el_min + (j+1) * self.el_stp:
#if current_el is done incrementing, increase step# and switch flag
j += 1
flag = 0
def transform_coords(self, az, el):
datetime_index = np.floor(self.ra_data.size * self.dt)
self.telescope.date = self.datetimes[int(datetime_index)]
ra, dec = coordinates.hor_to_eq(az, el, float(self.telescope.lat), self.telescope.sidereal_time())
return ra, dec
if __name__ == "__main__":
pointing = CreatePointing()
ra, dec, roll = pointing.make_boresight_pointing()
plt.plot(ra, dec, '.', markersize = 1.5)
plt.show()
| gpl-2.0 |
AaronWatters/inferelator_ng | inferelator_ng/tests/test_results_processor.py | 3 | 11794 | import unittest
from .. import results_processor
import pandas as pd
import numpy as np
class TestResultsProcessor(unittest.TestCase):
def test_combining_confidences_one_beta(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[0.5, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta], [beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.5, 0.0], [0.5, 1.0]]))
def test_combining_confidences_one_beta_invariant_to_rescale_division(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[1, 0], [1, 2]]), ['gene1', 'gene2'], ['tf1','tf2'])
rescaled_beta = pd.DataFrame((beta / 3.0), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.5, 0.0], [0.5, 1.0]]))
def test_combining_confidences_one_beta_all_negative_values(self):
# rescaled betas are only in the
beta = pd.DataFrame(np.array([[-1, -.5, -3], [-1, -2, 0]]), ['gene1', 'gene2'], ['tf1','tf2', 'tf3'])
rescaled_beta = pd.DataFrame([[0.2, 0.1, 0.4], [0.3, 0.5, 0]], ['gene1', 'gene2'], ['tf1','tf2', 'tf3'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[0.4, 0.2, 0.8], [0.6, 1.0, 0]]))
def test_combining_confidences_one_beta_with_negative_values(self):
# data was taken from a subset of row 42 of b subtilis run
beta = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta], [rescaled_beta])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[ 0.75, 0, 0.25, 1, 0.5 ]]))
def test_combining_confidences_two_betas_negative_values(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
confidences = rp.compute_combined_confidences()
np.testing.assert_equal(confidences.values,
np.array([[ 0.1, 0. , 0. , 0.3, 0.6]]))
def test_combining_confidences_two_betas_negative_values_assert_nonzero_betas(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(rp.betas_non_zero, np.array([[1 ,1, 2, 1, 2]]))
def test_combining_confidences_two_betas_negative_values_assert_sign_betas(self):
# data was taken from a subset of row 42 of b subtilis run
beta1 = pd.DataFrame(np.array([[-0.2841755, 0, 0.2280624, -0.3852462, 0.2545609]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta1 = pd.DataFrame(np.array([[0.09488207, 0, 0.07380172, 0.15597205, 0.07595131]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
beta2 = pd.DataFrame(np.array([[0, 0.2612011, 0.1922999, 0.00000000, 0.19183277]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rescaled_beta2 = pd.DataFrame(np.array([[0, 0.09109101, 0.05830292, 0.00000000, 0.3675702]]), ['gene1'], ['tf1','tf2','tf3', 'tf4', 'tf5'])
rp = results_processor.ResultsProcessor([beta1, beta2], [rescaled_beta1, rescaled_beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(rp.betas_sign, np.array([[-1 ,1, 2, -1, 2]]))
def test_threshold_and_summarize_one_beta(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1], [beta1])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,0]]))
def test_threshold_and_summarize_two_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2], [beta1, beta2])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,1]]))
def test_threshold_and_summarize_three_betas(self):
beta1 = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta3 = pd.DataFrame(np.array([[0.5, 0.2], [0.5, 0.1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2, beta3], [beta1, beta2, beta3])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,0]]))
def test_threshold_and_summarize_three_betas_negative_values(self):
beta1 = pd.DataFrame(np.array([[1, 0], [-0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[0, 0], [-0.5, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta3 = pd.DataFrame(np.array([[-0.5, 0.2], [-0.5, 0.1]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2, beta3], [beta1, beta2, beta3])
thresholded_mat = rp.threshold_and_summarize()
np.testing.assert_equal(thresholded_mat.values,
np.array([[1,0],[1,1]]))
####################
# TODO: Fix the following three tests so that they have unique and correct precision recall values
####################
def test_precision_recall_perfect_prediction(self):
gs = pd.DataFrame(np.array([[1, 0], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 1., 1., 1.])
def test_precision_recall_prediction_off(self):
gs = pd.DataFrame(np.array([[1, 0], [0, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0.5, 0.5, 1., 1. ])
np.testing.assert_equal(precision, [ 1., 1., 0.5, 2./3, 0.5])
def test_precision_recall_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, 1], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0., 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 0., 0., 0., 1./3, 0.5,])
def test_aupr_perfect_prediction(self):
gs = pd.DataFrame(np.array([[1, 0], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 1.0)
def test_negative_gs_aupr_perfect_prediction(self):
gs = pd.DataFrame(np.array([[-1, 0], [-1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 1.0)
def test_negative_gs_precision_recall_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, -1], [-1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
np.testing.assert_equal(recall, [ 0., 0., 0., 0.5, 1. ])
np.testing.assert_equal(precision, [ 0., 0., 0., 1./3, 0.5,])
def test_aupr_prediction_off(self):
gs = pd.DataFrame(np.array([[1, 0], [0, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0.5, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_equal(aupr, 19./24)
def test_aupr_bad_prediction(self):
gs = pd.DataFrame(np.array([[0, 1], [1, 0]]), ['gene1', 'gene2'], ['tf1','tf2'])
confidences = pd.DataFrame(np.array([[1, 0], [0, 0.5]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([], [])
recall, precision = rp.calculate_precision_recall(confidences, gs)
aupr = rp.calculate_aupr(recall, precision)
np.testing.assert_approx_equal(aupr, 7./24)
def test_mean_and_median(self):
beta1 = pd.DataFrame(np.array([[1, 1], [1, 1]]), ['gene1', 'gene2'], ['tf1','tf2'])
beta2 = pd.DataFrame(np.array([[2, 2], [2, 2]]), ['gene1', 'gene2'], ['tf1','tf2'])
rp = results_processor.ResultsProcessor([beta1, beta2], [beta1, beta2])
mean, median = rp.mean_and_median(rp.betas)
np.testing.assert_equal(mean, np.array([[ 1.5, 1.5],[ 1.5, 1.5]]))
np.testing.assert_equal(median, np.array([[ 1.5, 1.5],[ 1.5, 1.5]])) | bsd-2-clause |
Trymzet/Upload_rates | Upload_Rates_v0.7.py | 1 | 13988 | # Author - Michal Zawadzki, [email protected]. Updates/modifications highly encouraged (infoanarchism!). :)
import openpyxl
import pandas as pd
import xml.etree.ElementTree as ElementTree
from zipfile import ZipFile
from urllib.request import urlopen, urlretrieve
from datetime import datetime
from os import remove
from numpy import array
pd.options.mode.chained_assignment = None
# download xml and parse to an ElementTree Element object
def xml_to_element_tree(rates_url, country_abbreviation):
try:
print("Downloading {} rates...\n".format(country_abbreviation))
rates_xml = urlopen(rates_url)
rates_string = rates_xml.read()
rates_element_tree = ElementTree.fromstring(rates_string)
return rates_element_tree
except:
print(r"Oops! Cannot retrieve {} rates from {}\n".format(country_abbreviation, rates_url))
return
# courtesy of Austin Taylor, http://www.austintaylor.io/ -- adapted for our use
def xml2df(root):
all_records = []
headers = []
for i, child in enumerate(root):
record = []
for subchild in child:
record.append(subchild.text)
if subchild.tag not in headers:
headers.append(subchild.tag)
all_records.append(record)
return pd.DataFrame(all_records, columns=headers)
# format the date as the bare int format is treated as General, and we need it to be an Excel Date type
# use openpyxl's builtin number formats for date_format
def format_date_to_excel(excel_file_location, date_format="mm-dd-yy"):
wb = openpyxl.load_workbook(excel_file_location)
ws = wb.active
for row in ws:
if "A" not in str((row[2]).value): # skip header rows, picked "A" because column C headers have it :)
row[2].number_format = date_format
wb.save(excel_file_location)
def prepare_morocco():
# clean old file, download the raw rates file
try:
remove("VATSPOTR.txt")
except FileNotFoundError:
pass
try:
print("Downloading MA rates...\n")
urlretrieve("http://polaris-pro-ent.houston.hpe.com:8080/VATSPOTR.zip", "VATSPOTR.zip")
except:
print(r"Oops! Cannot retrieve MA rates from http://polaris-pro-ent.houston.hpe.com:8080/VATSPOTR.zip\n")
myzip = ZipFile("VATSPOTR.zip", "r")
myzip.extractall()
myzip.close()
remove("VATSPOTR.zip")
def generate_header(country_abbreviation):
header = pd.DataFrame([["CURRENCY_RATES", "COMPANY_ID=HP", "", ""],
["BASE_CURRENCY", "FOREIGN_CURRENCY", "EFFECTIVE_DATE", "RATE"]])
sources = {"MA": "SOURCE=BOM-MAD", "TR": "SOURCE=TNB-TRY", "SK": "SOURCE=ECB-EUR",
"RU": "SOURCE=NBR-RUB", "PL": "SOURCE=PNB-PLN"}
header.iloc[0][2] = sources[country_abbreviation]
return header
def generate_excel_output(header, data, output_path, country_abbreviation, output_date_format="mm-dd-yy"):
try:
with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
header.to_excel(writer, index=False, header=False)
data.to_excel(writer, index=False, header=False, startrow=2)
print("{} rates generated :)\n".format(country_abbreviation))
except:
print("Unable to generate {} rates. :(\n".format(country_abbreviation))
format_date_to_excel(output_path, date_format=output_date_format)
##########################################
################ MOROCCO #################
##########################################
prepare_morocco()
# read the txt to a DataFrame and leave only the currencies in scope
MA_csv = pd.read_csv("VATSPOTR.txt", sep="\t", header=1, index_col=False, parse_dates=[4])
MA_cur_in_scope = ["AED", "CAD", "CHF", "DZD", "EUR", "GBP", "LYD", "SAR", "SEK", "TND", "USD"]
MA_data = MA_csv[(MA_csv.iloc[:, 0] == "CBSEL") & (MA_csv.iloc[:, 2] == "MAD") & (MA_csv.iloc[:, 3].isin(MA_cur_in_scope))]
# note that rates in the raw file are normalized -- divide by the normalizer in order to get the actual rate
MA_normalizer = MA_data.iloc[:, 8]
MA_data.iloc[:, 7] = MA_data.iloc[:, 7].div(MA_normalizer)
# get rid of useless columns
output_columns = [2, 3, 4, 7]
useless_columns = MA_data[[x for x in range(MA_data.shape[1]) if x not in output_columns]]
MA_data.drop(useless_columns, axis=1, inplace=True)
# extract the rates' effective date for output file and the file's name -- must use Excel's number format
MA_effective_date = MA_data.iloc[0, 2]
MA_excel_date_format = (MA_effective_date - datetime(1899, 12, 31)).days + 1
#MA_data.iloc[:, 2] = array(MA_excel_date_format) unnecessary? -> delete MA_excel_date_format too
# create the final xlsx
MA_output_path = r"..\Upload_rates\Morocco Rates\MOROCCO_RATES\MOROCCO_RATES_" + str(MA_effective_date)[:-9] + ".xlsx"
MA_header = generate_header("MA")
generate_excel_output(MA_header, MA_data, MA_output_path, "MA")
# cleanup
remove("VATSPOTR.txt")
##########################################
################# TURKEY #################
##########################################
TR_etree = xml_to_element_tree("http://www.tcmb.gov.tr/kurlar/today.xml", "TR")
TR_number_of_rates = 12 # first twelve currencies
TR_cur_in_scope = pd.Series([child.attrib["CurrencyCode"] for child in TR_etree[:TR_number_of_rates]])
# convert the ElementTree to a DataFrame for easy manipulation and Excel conversion
TR_xml_df = xml2df(TR_etree) # TODO just use ET
# retrieve and format the dates
TR_effective_dates = pd.to_datetime(pd.Series([TR_etree.attrib["Date"] for _rate in range(TR_number_of_rates)]))
TR_effective_date = TR_effective_dates[0]
TR_excel_date_format = (TR_effective_date - datetime(1899, 12, 31)).days + 1
TR_base_cur = pd.Series(["TRY" for _rate in range(TR_number_of_rates)])
TR_rates = TR_xml_df.iloc[:TR_number_of_rates, 4].astype(float)
# use the real values of the rates
TR_normalizers = TR_xml_df.iloc[:TR_number_of_rates, 0].astype(int)
TR_rates_denormalized = TR_rates.div(TR_normalizers)
TR_data = pd.concat([TR_base_cur, TR_cur_in_scope, TR_effective_dates, TR_rates_denormalized], axis=1)
# convert dates to Excel's numeric date format
#TR_data.iloc[:, 2] = array(TR_excel_date_format) unnecessary? + delete excel_format
# create the final xlsx TODO: define a function for creating this path; use a settings file
TR_output_path = r"..\Upload_rates\Other Rates\TURKEY_RATES\TURKEY_RATES_" + str(TR_effective_date)[:-9] + ".xlsx"
TR_header = generate_header("TR")
generate_excel_output(TR_header, TR_data, TR_output_path, "TR")
##########################################
################ SLOVAKIA ################
##########################################
SK_etree = xml_to_element_tree("http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml", "SK")
SK_number_of_rates = len(SK_etree[2][0].getchildren())
SK_base_cur = pd.Series(["EUR" for _rate in range(SK_number_of_rates)])
SK_cur_in_scope = pd.Series([SK_etree[2][0][i].attrib["currency"] for i in range(SK_number_of_rates)])
SK_rates = pd.Series([SK_etree[2][0][i].attrib["rate"] for i in range(SK_number_of_rates)]).astype(float)
# dates magic
SK_effective_dates = pd.to_datetime(pd.Series([SK_etree[2][0].attrib["time"] for _rate in range(SK_number_of_rates)]))
SK_effective_date = SK_effective_dates[0]
SK_excel_date_format = (SK_effective_date - datetime(1899, 12, 31)).days + 1
SK_data = pd.concat([SK_base_cur, SK_cur_in_scope, SK_effective_dates, SK_rates], axis=1)
# reverse rate values, so that it's e.g. USD/EUR and not EUR/USD
SK_data.iloc[:, -1] = 1 / SK_data.iloc[:, -1]
# paste excel date int format
#SK_data.iloc[:, 2] = array(SK_excel_date_format) unnecessary?
# generate the final xlsx
SK_output_path = r"..\Upload_rates\Other Rates\SLOVAKIA_RATES\SLOVAKIA_RATES_" + str(SK_effective_date)[:-9] + ".xlsx"
SK_header = generate_header("SK")
generate_excel_output(SK_header, SK_data, SK_output_path, "SK")
##########################################
################# RUSSIA #################
##########################################
RU_etree = xml_to_element_tree("http://www.cbr.ru/scripts/XML_daily_eng.asp?", "RU")
RU_number_of_rates = len(RU_etree.getchildren())
RU_base_cur = pd.Series(["RUB" for _rate in range(RU_number_of_rates)])
RU_cur_in_scope = pd.Series([RU_etree[i][1].text for i in range(RU_number_of_rates)])
# retrieve the rates in string format and convert to float
RU_rates_txt = pd.Series([RU_etree[i][-1].text for i in range(RU_number_of_rates)])
RU_rates = RU_rates_txt.str.replace(",", ".").apply(lambda x: float(x))
# Dates magic. Replace symbols for easy conversion. Pandas infers the date format incorrectly; adjust it manually.
RU_effective_dates_str = pd.Series([RU_etree.attrib["Date"].replace(".", "/") for _rate in range(RU_number_of_rates)])
RU_effective_dates = pd.to_datetime(RU_effective_dates_str)
RU_effective_dates = pd.to_datetime(RU_effective_dates.dt.strftime("%d/%m/%Y"))
RU_effective_date = RU_effective_dates[0]
RU_excel_date_format = (RU_effective_date - datetime(1899, 12, 31)).days + 1
# normalize the rates
RU_normalizers = [int(RU_etree[i][2].text) for i in range(RU_number_of_rates)]
RU_rates = RU_rates.div(RU_normalizers)
RU_data = pd.concat([RU_base_cur, RU_cur_in_scope, RU_effective_dates, RU_rates], axis=1)
#RU_data.iloc[:, 2] = array(RU_excel_date_format) unnecessary?
# delete out_of_scope rates, change TMT to TMM
RU_out_of_scope_rates = ["XDR", "XAU"]
RU_data = RU_data[~(RU_data.iloc[:, 1].isin(RU_out_of_scope_rates))]
RU_data.replace("TMT", "TMM", inplace=True)
# generate the final xlsx
RU_output_path = r"..\Upload_rates\Other Rates\RUSSIA_RATES\RUSSIA_RATES_" + str(RU_effective_date)[:-9] + ".xlsx"
RU_header = generate_header("RU")
generate_excel_output(RU_header, RU_data, RU_output_path, "RU")
##########################################
################ POLAND A ################
##########################################
PL_A_etree = xml_to_element_tree("http://www.nbp.pl/kursy/xml/LastA.xml", "PL A")
PL_A_number_of_rates = len(PL_A_etree.getchildren()) - 2
PL_A_base_cur = pd.Series(["PLN" for _rate in range(PL_A_number_of_rates)])
PL_A_cur_in_scope = pd.Series([PL_A_etree[i][2].text for i in range(2, PL_A_number_of_rates + 2)])
# retrieve the rates in string format and convert to float
PL_A_rates_txt = pd.Series([PL_A_etree[i][-1].text for i in range(2, PL_A_number_of_rates + 2)])
PL_A_rates = PL_A_rates_txt.str.replace(",", ".").apply(lambda x: float(x))
# denormalize
PL_A_normalizers = [int(PL_A_etree[i][1].text) for i in range(2, PL_A_number_of_rates + 2)]
PL_A_rates = PL_A_rates.div(PL_A_normalizers)
# dates magic
PL_A_effective_dates_str = pd.Series([PL_A_etree[1].text for _rate in range(PL_A_number_of_rates)])
PL_A_effective_dates = pd.to_datetime(PL_A_effective_dates_str)
PL_A_effective_date = PL_A_effective_dates[0]
PL_A_effective_dates = pd.to_datetime(PL_A_effective_dates.dt.strftime("%m/%d/%Y"))
PL_A_data = pd.concat([PL_A_base_cur, PL_A_cur_in_scope, PL_A_effective_dates, PL_A_rates], axis=1)
#PL_A_data.iloc[:, 2] = array(PL_A_excel_date_format) unnecessary?
PL_A_out_of_scope_rate = "XDR"
PL_A_replacement_rates = {"AFN": "AFA", "GHS": "GHC", "MGA": "MGF", "MZN": "MZM", "SDG": "SDD", "SRD": "SRG", "ZWL": "ZWD"}
PL_A_data = PL_A_data[PL_A_data.iloc[:, 1] != PL_A_out_of_scope_rate]
PL_A_data.replace(PL_A_replacement_rates, inplace=True)
# generate the final xlsx
PL_A_output_path = r"..\Upload_rates\Other Rates\POLAND_A_RATES\POLAND_A_RATES_" + str(PL_A_effective_date)[:-9] + ".xlsx"
PL_A_header = generate_header("PL")
generate_excel_output(PL_A_header, PL_A_data, PL_A_output_path, "PL A")
##########################################
################ POLAND B ################
##########################################
PL_B_etree = xml_to_element_tree("http://www.nbp.pl/kursy/xml/LastB.xml", "PL B")
PL_B_number_of_rates = len(PL_B_etree.getchildren()) - 2
PL_B_base_cur = pd.Series(["PLN" for _rate in range(PL_B_number_of_rates)])
PL_B_cur_in_scope = pd.Series([PL_B_etree[i][2].text for i in range(2, PL_B_number_of_rates + 2)])
# retrieve the rates in string format and convert to float
PL_B_rates_txt = pd.Series([PL_B_etree[i][-1].text for i in range(2, PL_B_number_of_rates + 2)])
PL_B_rates = PL_B_rates_txt.str.replace(",", ".").apply(lambda x: float(x))
# denormalize
PL_B_normalizers = [int(PL_B_etree[i][1].text) for i in range(2, PL_B_number_of_rates + 2)]
PL_B_rates = PL_B_rates.div(PL_B_normalizers)
# dates magic
PL_B_effective_dates_str = pd.Series([PL_B_etree[1].text for _rate in range(PL_B_number_of_rates)])
PL_B_effective_dates = pd.to_datetime(PL_B_effective_dates_str)
PL_B_effective_date = PL_B_effective_dates[0]
PL_B_effective_dates = pd.to_datetime(PL_B_effective_dates.dt.strftime("%m/%d/%Y"))
PL_B_data = pd.concat([PL_B_base_cur, PL_B_cur_in_scope, PL_B_effective_dates, PL_B_rates], axis=1)
#PL_B_data.iloc[:, 2] = array(PL_B_excel_date_format) unnecessary?
PL_B_replacement_rates = {"AFN": "AFA", "GHS": "GHC", "MGA": "MGF", "MZN": "MZM", "SDG": "SDD", "SRD": "SRG",
"ZWL": "ZWD", "ZMW": "ZMK"}
PL_B_data.replace(PL_B_replacement_rates, inplace=True)
# generate the final xlsx
PL_B_output_path = r"..\Upload_rates\Other Rates\POLAND_B_RATES\POLAND_B_RATES_" + str(PL_B_effective_date)[:-9] + ".xlsx"
PL_B_header = generate_header("PL")
generate_excel_output(PL_B_header, PL_B_data, PL_B_output_path, "PL B")
# TODO
# TODO create a settings file with the destination folder for the output file
# TODO
# if the directory does not exist - create it
# beautify the final date converting if statement - maybe isinstance(row[2].value, basestring)?
# refactor
# add a log file?
| unlicense |
djgagne/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
matthieudumont/dipy | doc/examples/reconst_mapmri.py | 5 | 4013 | """
================================================================
Continuous and analytical diffusion signal modelling with MAPMRI
================================================================
We show how to model the diffusion signal as a linear combination
of continuous functions from the MAPMRI basis [Ozarslan2013]_.
We also compute the analytical Orientation Distribution Function (ODF),
the the Return To the Origin Probability (RTOP), the Return To the Axis
Probability (RTAP), and the Return To the Plane Probability (RTPP).
First import the necessary modules:
"""
from dipy.reconst.mapmri import MapmriModel
from dipy.viz import fvtk
from dipy.data import fetch_cenir_multib, read_cenir_multib, get_sphere
from dipy.core.gradients import gradient_table
import matplotlib.pyplot as plt
"""
Download and read the data for this tutorial.
MAPMRI requires multi-shell data, to properly fit the radial part of the basis.
The total size of the downloaded data is 1760 MBytes, however you only need to
fetch it once. Parameter ``with_raw`` of function ``fetch_cenir_multib`` is set
to ``False`` to only download eddy-current/motion corrected data:.
"""
fetch_cenir_multib(with_raw=False)
"""
For this example we select only the shell with b-values equal to the one of the
Human Connectome Project (HCP).
"""
bvals = [1000, 2000, 3000]
img, gtab = read_cenir_multib(bvals)
data = img.get_data()
data_small = data[40:65, 50:51, 35:60]
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data contains the voxel data and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to show the b-values
it is possible to write print(gtab.bvals).
Instantiate the MAPMRI Model.
radial_order is the radial order of the MAPMRI basis.
For details regarding the parameters see [Ozarslan2013]_.
"""
radial_order = 4
map_model = MapmriModel(gtab, radial_order=radial_order,
lambd=2e-1, eap_cons=False)
"""
Fit the MAPMRI model to the data
"""
mapfit = map_model.fit(data_small)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Compute the ODFs
"""
odf = mapfit.odf(sphere)
print('odf.shape (%d, %d, %d, %d)' % odf.shape)
"""
Display the ODFs
"""
r = fvtk.ren()
sfu = fvtk.sphere_funcs(odf, sphere, colormap='jet')
sfu.RotateX(-90)
fvtk.add(r, sfu)
fvtk.record(r, n_frames=1, out_path='odfs.png', size=(600, 600))
"""
.. figure:: odfs.png
:align: center
**Orientation distribution functions**.
With MAPMRI it is also possible to extract the Return To the Origin Probability
(RTOP), the Return To the Axis Probability (RTAP), and the Return To the Plane
Probability (RTPP). These ensemble average propagator (EAP) features directly
reflects microstructural properties of the underlying tissues [Ozarslan2013]_.
"""
rtop = mapfit.rtop()
rtap = mapfit.rtap()
rtpp = mapfit.rtpp()
"""
Show the maps and save them in MAPMRI_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title=r'$\sqrt[3]{RTOP}$')
ax1.set_axis_off()
ind = ax1.imshow((rtop[:, 0, :]**(1.0 / 3)).T,
interpolation='nearest', origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
ax2 = fig.add_subplot(2, 2, 2, title=r'$\sqrt{RTAP}$')
ax2.set_axis_off()
ind = ax2.imshow((rtap[:, 0, :]**0.5).T,
interpolation='nearest', origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
ax3 = fig.add_subplot(2, 2, 3, title=r'$RTPP$')
ax3.set_axis_off()
ind = ax3.imshow(rtpp[:, 0, :].T, interpolation='nearest',
origin='lower', cmap=plt.cm.gray)
plt.colorbar(ind, shrink = 0.8)
plt.savefig('MAPMRI_maps.png')
"""
.. figure:: MAPMRI_maps.png
:align: center
**RTOP, RTAP, and RTPP calculated using MAPMRI**.
.. [Ozarslan2013] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
gsnyder206/synthetic-image-morph | morphology_analysis/illcan_quickplots.py | 1 | 9048 | import math
import string
import sys
import struct
import matplotlib
#matplotlib.use('PDF')
import matplotlib.pyplot as pyplot
import matplotlib.colors as pycolors
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import cPickle
import asciitable
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
from scipy.integrate import quad
import glob
import os
import gzip
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import astropy
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import WMAP7,z_at_value
import copy
import datetime
import ezgal
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.collections import PolyCollection
import parse_illustris_morphs as pim
import illustris_python as ilpy
from parse_illustris_morphs import *
from PyML import machinelearning as pyml
from PyML import convexhullclassifier as cvx
import statmorph
ilh = 0.704
illcos = astropy.cosmology.FlatLambdaCDM(H0=70.4,Om0=0.2726,Ob0=0.0456)
sq_arcsec_per_sr = 42545170296.0
def make_pc_dict(mo,fi):
parameters = ['C','M20','GINI','ASYM','MPRIME','I','D']
pcd = {}
pcd['C'] = mo.cc[:,:,fi,0].flatten()
pcd['M20'] = mo.m20[:,:,fi,0].flatten()
pcd['GINI'] = mo.gini[:,:,fi,0].flatten()
pcd['ASYM'] = mo.asym[:,:,fi,0].flatten()
pcd['MPRIME'] = mo.mid1_mstat[:,:,fi,0].flatten()
pcd['I'] = mo.mid1_istat[:,:,fi,0].flatten()
pcd['D'] = mo.mid1_dstat[:,:,fi,0].flatten()
npmorph = pyml.dataMatrix(pcd,parameters)
pc = pyml.pcV(npmorph)
return parameters, pcd, pc, pcd
def pc1_sizemass_panel(f1,nr,nc,nt,size,mass,pc1,xlim=[5.0e9,5.0e11],ylim={'sizemass':[0.7,20.0]},gridsize=12,vlim=[-2,2]):
rlim = np.log10(np.asarray(ylim['sizemass']))
mlim = np.log10(np.asarray(xlim))
extent=[mlim[0],mlim[1],rlim[0],rlim[1]]
print extent
s=11
nzi = np.where(size > 0.0)[0]
axi = f1.add_subplot(nr,nc,nt)
axi.locator_params(nbins=5,prune='both')
print mass.shape, size.shape, pc1.shape
axi.hexbin(mass[nzi],size[nzi],C=-1.0*pc1[nzi],gridsize=gridsize,xscale='log',yscale='log',reduce_C_function=np.median,mincnt=3,extent=extent,vmin=vlim[0],vmax=vlim[1])
axi.tick_params(axis='both',which='major',labelsize=s)
axi.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
if nt % nc == 1:
axi.set_ylabel(r'$r_{1/2}$ [kpc]',size=s,labelpad=1)
else:
axi.set_yticklabels([])
if nt > (nr-1)*nc:
axi.set_xlabel(r'$M_{*}$ [$M_{\odot}$]',size=s,labelpad=1)
else:
axi.set_xticklabels([])
return axi,s
def pc1_sfrmass_panel(f1,nr,nc,nt,sfr,mass,pc1,xlim=[5.0e9,5.0e11],ylim={'sfrmass':[5,500.0]},gridsize=12,vlim=[-2,2]):
rlim = np.log10(np.asarray(ylim['sfrmass']))
mlim = np.log10(np.asarray(xlim))
extent=[mlim[0],mlim[1],rlim[0],rlim[1]]
print extent
s=11
nzi = np.where(sfr > 0.0)[0]
axi = f1.add_subplot(nr,nc,nt)
axi.locator_params(nbins=5,prune='both')
print mass.shape, sfr.shape, pc1.shape
axi.hexbin(mass[nzi],sfr[nzi],C=-1.0*pc1[nzi],gridsize=gridsize,xscale='log',yscale='log',reduce_C_function=np.median,mincnt=3,extent=extent,vmin=vlim[0],vmax=vlim[1])
axi.tick_params(axis='both',which='major',labelsize=s)
if nt % nc == 1:
axi.set_ylabel(r'Star Formation Rate',size=s-1,labelpad=1)
else:
axi.set_yticklabels([])
if nt > (nr-1)*nc:
axi.set_xlabel(r'Stellar Mass [$M_{\odot}$]',size=s-1,labelpad=1)
else:
axi.set_xticklabels([])
return axi,s
def do_pc1_sizemass(figfile,data=None, snaps=None,filters=None,**kwargs):
assert data is not None
assert snaps is not None
f1 = pyplot.figure(figsize=(6.5,3.5), dpi=150)
pyplot.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.98,wspace=0.0,hspace=0.05)
for i,s,f in zip(range(len(snaps)),snaps,filters):
mo = data['morph'][s]
filternames = mo.filters
fi = np.where(filternames==f)[0]
assert fi.shape[0]==1
fi=fi[0]
print filternames[fi]
redshift = mo.redshift[0,0,fi,0]
print redshift
kpc_per_arcsec = illcos.kpc_proper_per_arcmin(redshift).value/60.0
size_pixels = data['morph'][s].rhalfc[:,:,fi,0].flatten() #note:rhalfe values incorrect in first parsing
pix_arcsec = data['morph'][s].pix_arcsec[0,0,fi,0].flatten()
print size_pixels.shape
print pix_arcsec.shape
print fi.shape
print kpc_per_arcsec
size_kpc = size_pixels*pix_arcsec*kpc_per_arcsec
print size_kpc.shape
print pix_arcsec
print np.max(size_kpc), np.max(size_pixels)
mstar_1 = data['subhalos'][s]['SubhaloMassInRadType'][:,4].flatten()*(1.0e10)/ilh
print mstar_1.shape
mass = np.swapaxes(np.tile(mstar_1,(4,1)),0,1)
print mass.shape
parameters,npdict,pc,pcd = make_pc_dict(mo,fi)
pc1 = pc.X[:,0].flatten()
print pc1.shape
axi,lsize = pc1_sizemass_panel(f1,2,3,i+1,size_kpc,mass.flatten(),pc1,**kwargs)
axi.annotate('z={:4.1f}'.format(redshift),(0.85,0.90),xycoords='axes fraction',size=lsize,color='black',ha='center',va='center')
axi.annotate(f,(0.75,0.10),xycoords='axes fraction',size=lsize,color='black',ha='center',va='center')
f1.savefig(figfile)
pyplot.close(f1)
return
def do_pc1_sfrmass(figfile,data=None, snaps=None,filters=None,**kwargs):
assert data is not None
assert snaps is not None
f1 = pyplot.figure(figsize=(7.0,3.5), dpi=150)
pyplot.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.98,wspace=0.0,hspace=0.05)
for i,s,f in zip(range(len(snaps)),snaps,filters):
mo = data['morph'][s]
filternames = mo.filters
fi = np.where(filternames==f)[0]
assert fi.shape[0]==1
fi=fi[0]
print filternames[fi]
redshift = mo.redshift[0,0,fi,0]
print redshift
kpc_per_arcsec = illcos.kpc_proper_per_arcmin(redshift).value/60.0
size_pixels = data['morph'][s].rhalfc[:,:,fi,0].flatten() #note:rhalfe values incorrect in first parsing
pix_arcsec = data['morph'][s].pix_arcsec[0,0,fi,0].flatten()
print size_pixels.shape
print pix_arcsec.shape
print fi.shape
print kpc_per_arcsec
size_kpc = size_pixels*pix_arcsec*kpc_per_arcsec
print size_kpc.shape
print pix_arcsec
print np.max(size_kpc), np.max(size_pixels)
mstar_1 = data['subhalos'][s]['SubhaloMassInRadType'][:,4].flatten()*(1.0e10)/ilh
print mstar_1.shape
mass = np.swapaxes(np.tile(mstar_1,(4,1)),0,1)
print mass.shape
sfr_1 = data['subhalos'][s]['SubhaloSFR'][:].flatten()
sfr = np.swapaxes(np.tile(sfr_1,(4,1)),0,1)
parameters,npdict,pc,pcd = make_pc_dict(mo,fi)
pc1 = pc.X[:,0].flatten()
print pc1.shape
axi,lsize = pc1_sfrmass_panel(f1,2,3,i+1,sfr.flatten(),mass.flatten(),pc1,**kwargs)
axi.annotate('z={:4.1f}'.format(redshift),(0.15,0.90),xycoords='axes fraction',size=lsize,color='black',ha='center',va='center')
axi.annotate(f,(0.75,0.10),xycoords='axes fraction',size=lsize,color='black',ha='center',va='center')
f1.savefig(figfile)
pyplot.close(f1)
return
def do_candels_loading(filename = 'MorphDataObjectLight_SB25.pickle',hstonly=False):
snaps = ['snapshot_060','snapshot_064','snapshot_068','snapshot_075','snapshot_085','snapshot_103']
if hstonly is True:
filters = ['WFC3-F160W','WFC3-F160W','WFC3-F160W','WFC3-F105W','WFC3-F105W','ACS-F814W']
else:
filters = ['NC-F200W','NC-F150W','NC-F150W','NC-F115W','ACS-F814W','ACS-F606W']
data_dict,snaps = pim.load_everything(snaps=snaps,filename=filename)
return snaps, filters, data_dict
def do_jandels_loading(filename = 'MorphDataObjectLight_SB27.pickle'):
snaps = ['snapshot_041','snapshot_045','snapshot_049','snapshot_054','snapshot_060','snapshot_064']
filters = ['NC-F356W','NC-F356W','NC-F277W','NC-F200W','NC-F200W','NC-F150W']
data_dict,snaps = pim.load_everything(snaps=snaps,filename=filename)
return snaps, filters, data_dict
def do_all_plots(snaps,fils,data,label='CANDELS',**kwargs):
do_pc1_sizemass('PC1_sizemass_'+label+'.pdf',data=data,snaps=snaps,filters=fils,**kwargs)
do_pc1_sfrmass('PC1_sfrmass_'+label+'.pdf',data=data,snaps=snaps,filters=fils,vlim=[-1,2],gridsize=10,**kwargs)
return
if __name__=="__main__":
#as of 4/1/2016:
#available_filters = ['WFC3-F336W','ACS-F435W','ACS-F606W','ACS-F814W','WFC3-F105W','WFC3-F160W','NC-F115W','NC-F150W','NC-F200W','NC-F277W','NC-F356W','NC-F444W']
snaps, filters, data_dict = do_candels_loading()
do_pc1_sizemass('PC1_sizemass.pdf', data=data_dict, snaps=snaps, filters=filters)
| gpl-2.0 |
aalmah/fuel | docs/conf.py | 10 | 9795 | # -*- coding: utf-8 -*-
#
# Fuel documentation build configuration file, created by
# sphinx-quickstart2 on Wed Oct 8 17:59:44 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from mock import Mock as MagicMock
from sphinx.ext.autodoc import cut_lines
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
]
intersphinx_mapping = {
'theano': ('http://theano.readthedocs.org/en/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'python': ('http://docs.python.org/3.4', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None)
}
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['h5py', 'zmq']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
graphviz_dot_args = ['-Gbgcolor=# fcfcfc'] # To match the RTD theme
# Render todo lists
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel'
copyright = u'2014, Université de Montréal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import fuel
version = '.'.join(fuel.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = fuel.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fueldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Fuel.tex', u'Fuel Documentation',
u'Université de Montréal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuel', u'Fuel Documentation',
[u'Université de Montréal'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Fuel', u'Fuel Documentation',
u'Université de Montréal', 'Fuel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def skip_abc(app, what, name, obj, skip, options):
return skip or name.startswith('_abc')
def setup(app):
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.connect('autodoc-skip-member', skip_abc)
| mit |
drublackberry/fantastic_demos | TextClassification/working_with_text.py | 1 | 3298 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 15 18:04:38 2017
@author: Andreu Mora
"""
import logging
logging.basicConfig()
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
# Load the data
from sklearn.datasets import fetch_20newsgroups
twenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)
print twenty_train.target_names
print len(twenty_train.data)
print len(twenty_train.filenames)
print "\n".join(twenty_train.data[0].split("\n")[:3])
print twenty_train.target_names[twenty_train.target[0]]
# Extract features
from sklearn.feature_extraction.text import CountVectorizer
count_vec = CountVectorizer()
X_train_counts = count_vec.fit_transform(twenty_train.data)
print X_train_counts.shape
print count_vec.vocabulary_.get('algorithm')
# From occurrences to frequencies
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tfidf = tf_transformer.transform(X_train_counts)
print X_train_tfidf.shape
# Classify using Naive Bayes
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target)
# Predict new documents
docs_new = ['God is love', 'OpenGL on the GPU is fast']
X_new_counts = count_vec.transform(docs_new)
X_new_tfidf = tf_transformer.transform(X_new_counts)
predicted = clf.predict(X_new_tfidf)
for doc, category in zip(docs_new, predicted):
print '{:s} => {:s}'.format(doc, twenty_train.target_names[category])
# Building a pipeline
from sklearn.pipeline import Pipeline
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())])
text_clf = text_clf.fit(twenty_train.data, twenty_train.target)
# Evaluation of the performance
import numpy as np
twenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42)
docs_test = twenty_test.data
predicted = text_clf.predict(docs_test)
print np.mean(predicted==twenty_test.target)
# Using an SVM
from sklearn.linear_model import SGDClassifier
text_clf = Pipeline ([('vect', CountVectorizer()), \
('tfidf', TfidfTransformer()), \
('clf', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42))])
text_clf.fit(twenty_train.data, twenty_train.target)
predicted = text_clf.predict(docs_test)
print np.mean(predicted==twenty_test.target)
# Better performance statistics
from sklearn import metrics
print metrics.classification_report(twenty_test.target, predicted, target_names=twenty_test.target_names)
# Parameter tuning using grid_search
from sklearn.model_selection import GridSearchCV
parameters = {'vect__ngram_range': [(1,1), (1,2)], \
'tfidf__use_idf': (True, False), \
'clf__alpha': (1e-2, 1e-3) }
gs_clf = GridSearchCV(text_clf, parameters, n_jobs=1)
gs_clf = gs_clf.fit(twenty_train.data[:400], twenty_train.target[:400])
print twenty_train.target_names[gs_clf.predict(['God is love'])[0]]
gs_clf.best_score_
for param_name in sorted(parameters.keys()):
print '{:s} => {:s}'.format(param_name, str(gs_clf.best_params_[param_name]))
| mit |
rl-institut/reegis_hp | reegis_hp/de21/weather.py | 3 | 6187 | __copyright__ = "Uwe Krien"
__license__ = "GPLv3"
import pandas as pd
import os
import calendar
import logging
import shapely.wkt as wkt
import tools
try:
import oemof.db.coastdat as coastdat
import oemof.db as db
import sqlalchemy
except ImportError:
coastdat = None
db = None
sqlalchemy = None
def get_average_wind_speed(weather_path, grid_geometry_file, geometry_path,
in_file_pattern, out_file, overwrite=False):
"""
Get average wind speed over all years for each coastdat region. This can be
used to select the appropriate wind turbine for each region
(strong/low wind turbines).
Parameters
----------
overwrite : boolean
Will overwrite existing files if set to 'True'.
weather_path : str
Path to folder that contains all needed files.
geometry_path : str
Path to folder that contains geometry files.
grid_geometry_file : str
Name of the geometry file of the weather data grid.
in_file_pattern : str
Name of the hdf5 weather files with one wildcard for the year e.g.
weather_data_{0}.h5
out_file : str
Name of the results file (csv)
"""
if not os.path.isfile(os.path.join(weather_path, out_file)) or overwrite:
logging.info("Calculating the average wind speed...")
# Finding existing weather files.
filelist = (os.listdir(weather_path))
years = list()
for y in range(1970, 2020):
if in_file_pattern.format(year=y) in filelist:
years.append(y)
# Loading coastdat-grid as shapely geometries.
polygons_wkt = pd.read_csv(os.path.join(geometry_path,
grid_geometry_file))
polygons = pd.DataFrame(tools.postgis2shapely(polygons_wkt.geom),
index=polygons_wkt.gid, columns=['geom'])
# Opening all weather files
store = dict()
# open hdf files
for year in years:
store[year] = pd.HDFStore(os.path.join(
weather_path, in_file_pattern.format(year=year)), mode='r')
logging.info("Files loaded.")
keys = store[years[0]].keys()
logging.info("Keys loaded.")
n = len(list(keys))
logging.info("Remaining: {0}".format(n))
for key in keys:
wind_speed_avg = pd.Series()
n -= 1
if n % 100 == 0:
logging.info("Remaining: {0}".format(n))
weather_id = int(key[2:])
for year in years:
# Remove entries if year has to many entries.
if calendar.isleap(year):
h_max = 8784
else:
h_max = 8760
ws = store[year][key]['v_wind']
surplus = h_max - len(ws)
if surplus < 0:
ws = ws.ix[:surplus]
# add wind speed time series
wind_speed_avg = wind_speed_avg.append(
ws, verify_integrity=True)
# calculate the average wind speed for one grid item
polygons.loc[weather_id, 'v_wind_avg'] = wind_speed_avg.mean()
# Close hdf files
for year in years:
store[year].close()
# write results to csv file
polygons.to_csv(os.path.join(weather_path, out_file))
else:
logging.info("Skipped: Calculating the average wind speed.")
def fetch_coastdat2_year_from_db(weather_path, geometry_path, out_file_pattern,
geometry_file, years=range(1980, 2020),
overwrite=False):
"""Fetch coastDat2 weather data sets from db and store it to hdf5 files.
this files has to be adapted if the new weather data base is available.
Parameters
----------
overwrite : boolean
Skip existing files if set to False.
years : list of integer
Years to fetch.
weather_path : str
Path to folder that contains all needed files.
geometry_path : str
Path to folder that contains geometry files.
geometry_file : str
Name of the geometry file to clip the weather data set.
out_file_pattern : str
Name of the hdf5 weather files with one wildcard for the year e.g.
weather_data_{0}.h5
"""
weather = os.path.join(weather_path, out_file_pattern)
geometry = os.path.join(geometry_path, geometry_file)
polygon = wkt.loads(
pd.read_csv(geometry, index_col='gid', squeeze=True)[0])
# remove year 2000 due to an internal error
# years = list(years)
# years.remove(2000)
try:
conn = db.connection()
except sqlalchemy.exc.OperationalError:
conn = None
for year in years:
if not os.path.isfile(weather.format(year=str(year))) or overwrite:
try:
weather_sets = coastdat.get_weather(conn, polygon, year)
except AttributeError:
logging.warning("No database connection found.")
weather_sets = list()
if len(weather_sets) > 0:
logging.info("Fetching weather data for {0}.".format(year))
store = pd.HDFStore(weather.format(year=str(year)), mode='w')
for weather_set in weather_sets:
logging.debug(weather_set.name)
store['A' + str(weather_set.name)] = weather_set.data
store.close()
else:
logging.warning("No weather data found for {0}.".format(year))
else:
logging.info("Weather data for {0} exists. Skipping.".format(year))
def coastdat_id2coord():
"""
Creating a file with the latitude and longitude for all coastdat2 data sets.
"""
conn = db.connection()
sql = "select gid, st_x(geom), st_y(geom) from coastdat.spatial;"
results = (conn.execute(sql))
columns = results.keys()
data = pd.DataFrame(results.fetchall(), columns=columns)
data.set_index('gid', inplace=True)
data.to_csv(os.path.join('data', 'basic', 'id2latlon.csv'))
| gpl-3.0 |
madjelan/scikit-learn | sklearn/linear_model/least_angle.py | 57 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
cmaclell/concept_formation | concept_formation/examples/trestle_cluster_split_search.py | 1 | 2105 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from random import shuffle
from random import seed
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
import numpy as np
from concept_formation.trestle import TrestleTree
from concept_formation.cluster import cluster_split_search
from concept_formation.cluster import AIC, BIC, AICc, CU
from concept_formation.datasets import load_rb_wb_03
from concept_formation.datasets import load_rb_com_11
from concept_formation.datasets import load_rb_s_13
from concept_formation.preprocessor import ObjectVariablizer
seed(5)
hueristics = [AIC, BIC, CU, AICc]
def calculate_aris(dataset):
shuffle(dataset)
dataset = dataset[:60]
variablizer = ObjectVariablizer()
dataset = [variablizer.transform(t) for t in dataset]
tree = TrestleTree()
tree.fit(dataset)
clusters = [cluster_split_search(tree, dataset, h, minsplit=1, maxsplit=40,
mod=False) for h in hueristics]
human_labels = [ds['_human_cluster_label'] for ds in dataset]
return [max(adjusted_rand_score(human_labels, huer), 0.01) for huer in
clusters]
x = np.arange(len(hueristics))
width = 0.3
hueristic_names = ['AIC', 'BIC', 'CU', 'AICc']
# for i in range(len(clusters)):
# hueristic_names[i] += '\nClusters='+str(len(set(clusters[i])))
b1 = plt.bar(x-width, calculate_aris(load_rb_wb_03()),
width, color='r', alpha=.8, align='center')
b2 = plt.bar(x, calculate_aris(load_rb_com_11()),
width, color='b', alpha=.8, align='center')
b3 = plt.bar(x+width, calculate_aris(load_rb_s_13()),
width, color='g', alpha=.8, align='center')
plt.legend((b1[0], b2[0], b3[0]), ('wb_03', 'com_11', 's_13'))
plt.title("TRESTLE Clustering Accuracy of Best Clustering by Different"
" Hueristics")
plt.ylabel("Adjusted Rand Index (Agreement Correcting for Chance)")
plt.ylim(0, 1)
plt.xlabel("Hueristic")
plt.xticks(x, hueristic_names)
plt.show()
| mit |
davidaknowles/epidiff | great_query.py | 1 | 2092 | import urllib, urllib2
import sys
import os
import pandas
tempfn="~/www/temp.bed"
baseUrl="http://bejerano.stanford.edu/great/public/cgi-bin/greatStart.php"
def great_query(fn):
os.system("cp %s %s" % ( fn, tempfn ))
data={}
data['requestURL']="http://cs.stanford.edu/~davidknowles/temp.bed"
data['requestSpecies']="hg18"
data['outputType']="batch"
encoded=urllib.urlencode(data)
request=urllib2.urlopen(baseUrl + "?" + encoded)
print baseUrl + "?" + encoded
names=["Ontology", "ID", "Desc", "BinomRank", "BinomP", "BinomBonfP", "BinomFdrQ", "RegionFoldEnrich", "ExpRegions", "ObsRegions", "GenomeFrac", "SetCov", "HyperRank", "HyperP", "HyperBonfP", "HyperFdrQ", "GeneFoldEnrich", "ExpGenes", "ObsGenes", "TotalGenes", "GeneSetCov", "TermCov", "Regions", "Genes"]
df=pandas.io.parsers.read_table(request,comment="#",skiprows=5,header=None,names=names)
os.system("rm " + tempfn)
request.close()
f=df[(df['HyperFdrQ']<0.1) & (df['BinomFdrQ'] < 0.1) & (df['RegionFoldEnrich'] > 2.0) & (df['Ontology']=="GO Biological Process")]
#f=df[(df['RegionFoldEnrich'] > 2.0) & (df['Ontology']=="GO Biological Process")]
f=f.sort('BinomP')
return f
if __name__=="__main__":
cellTypeListFile = open("/afs/cs.stanford.edu/u/imk1/P01Project/src/cellList.txt")
first_time=1
for cell in cellTypeListFile:
celln=cell.strip()
#fn="/afs/cs.stanford.edu/u/imk1/P01Project/MACSResults/PartialPeaks2/%s-GIIAX_EnhancerMerged" \% celln
fn="/afs/cs.stanford.edu/u/davidknowles/scratch/p01project/quest_enhancers/%s_enhancers_simp.bed" % celln
f=great_query(fn)
print celln, f.shape[0]
if f.shape[0] > 0:
f=f[['Ontology','ID','Desc','BinomFdrQ']].iloc[1:min(10,f.shape[0])]
f['Cell']=celln
if first_time:
res=f
else:
res=res.append(f)
first_time=0
res=res.ix[:,['Cell','Ontology','ID','Desc','BinomFdrQ']]
res.to_csv("GO_only.csv")
| gpl-2.0 |
Riptawr/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
voxlol/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/mpl_toolkits/axes_grid1/axes_grid.py | 10 | 29410 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self._hold = True
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class,
self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad[0])
self._vert_pad_size = Size.Fixed(axes_pad[1])
def _update_locators(self):
h = []
h_ax_pos = []
for _ in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
# These two lines actually differ from ones in _init_axes_pad
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
get axes_pad
Returns
-------
tuple
Padding in inches, (horizontal pad, vertical pad)
"""
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of axes_grid's subclass of
:class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
# horizontal or vertical arrangement?
if cbar_location in ("left", "right"):
self._colorbar_pad = axes_pad[0]
else:
self._colorbar_pad = axes_pad[1]
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
if self.axes_all:
sharex = self.axes_all[0]
sharey = self.axes_all[0]
else:
sharex = None
sharey = None
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
if share_all:
if self._refax is None:
self._refax = ax
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
elif self._colorbar_mode == "edge":
for index, ax in enumerate(self.axes_all):
col, row = self._get_col_row(index)
if self._colorbar_location in ("left", "right"):
ax.cax = self.cbar_axes[row]
else:
ax.cax = self.cbar_axes[col]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if (self._colorbar_mode == "single" and
self._colorbar_location in ('left', 'bottom')):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and
self._colorbar_location == "right"):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and
self._colorbar_location == "top"):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col,
# ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right'
and col == self._ncols-1)):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and
row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
| gpl-3.0 |
markstoehr/spectral_features | filters/fulop_reassignment.py | 1 | 15179 | from __future__ import division
import filterbank as fb
import numpy as np
from scipy.io import wavfile
import matplotlib.pyplot as plt
import transforms
from scipy.special import gamma
taper_length=511
order=6
half_time_support=6
h_temp,dh_temp,ddh_temp, tt = fb.hermite_window(taper_length,
order,
half_time_support)
h = np.zeros((h_temp.shape[0],
h_temp.shape[1]+1))
h[:,:-1] = h_temp
dh = np.zeros((dh_temp.shape[0],
dh_temp.shape[1]+1))
dh[:,:-1] = dh_temp
ddh = np.zeros((ddh_temp.shape[0],
ddh_temp.shape[1]+1))
ddh[:,:-1] = ddh_temp
sr,x = wavfile.read('/home/mark/Research/phoneclassification/sa1.wav')
x = x.astype(float)/2**15
x[1:] = x[1:] - .95*x[:-1]
oversampling=3
N = x.size
N1 = 512
nframes = int(.5 + N/N1*2**oversampling)
greater_than_winlength = 386*np.ones((nframes,N1)) > np.arange(N1)
indices = (np.arange(N1,dtype=int)-int(N1/2))[np.newaxis, : ] + int(N1/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
frames = np.fft.fft((x[indices]*greater_than_winlength) * h[0])
frames_mv = frames * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
dframes = np.fft.fft((x[indices]*greater_than_winlength) * dh[0])
dframes_mv = dframes * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
w_hat_delta = np.imag((dframes_mv * frames_mv.conj())/np.abs(frames)**2)
w_hat_delta_unnorm = np.imag((dframes_mv * frames_mv.conj()))
w_hat_delta_compressed = np.log(w_hat_delta * np.sign(w_hat_delta) +1e-8) * np.sign(w_hat_delta)
taper_length=255
order=6
half_time_support=6
h_temp,dh_temp, tt = hermite_window(taper_length,
order,
half_time_support)
h = np.zeros((h_temp.shape[0],
h_temp.shape[1]+1))
h[:,:-1] = h_temp
dh = np.zeros((dh_temp.shape[0],
dh_temp.shape[1]+1))
dh[:,:-1] = dh_temp
N1 = 256
oversampling=2
nframes = int(.5 + N/N1*2**oversampling)
greater_than_winlength = 256*np.ones((nframes,N1)) > np.arange(N1)
indices = (np.arange(N1,dtype=int)-int(N1/2))[np.newaxis, : ] + int(N1/2**oversampling)*np.arange(nframes,dtype=int)[:,np.newaxis]
indices *= (2*(indices > 0)-1)
# symmetrize the tail
tail_indices = indices > N-1
indices[tail_indices] = N-1 - (indices[tail_indices] - N+1)
frames = np.fft.fft((x[indices]*greater_than_winlength) * h[0])
frames_mv = frames * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
dframes = np.fft.fft((x[indices]*greater_than_winlength) * dh[0])
dframes_mv = dframes * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
w_hat_delta = np.imag((dframes_mv * frames_mv.conj())/np.abs(frames)**2)
w_hat_delta_unnorm = np.imag((dframes_mv * frames_mv.conj()))
w_hat_delta_compressed = np.log(w_hat_delta * np.sign(w_hat_delta) +1e-8) * np.sign(w_hat_delta)
plt.subplot(2,1,1);plt.imshow(w_hat_delta.T[:100],interpolation='nearest',origin='lower',aspect=2,vmin=-.03,vmax=.03,cmap='bone'); plt.subplot(2,1,2); plt.imshow(np.log(np.abs(frames)).T[:100],origin='lower',interpolation='nearest'); plt.show()
avg = np.zeros(frames.shape,dtype=np.complex128)
avg_d = np.zeros(dframes.shape,dtype=np.complex128)
abs_avg = np.zeros(frames.shape)
w_hat_delta_avg = np.zeros(frames.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * h[i])
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
avg += f
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * dh[i])
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
avg_d += np.abs(df_mv)
w_hat_delta_avg += np.imag((df_mv * f_mv.conj())/np.abs(f_mv)**2)
avg/=5
avg_d/=5
abs_avg/=5
w_hat_delta_avg/=5
w_hat_delta = np.imag((avg_d * avg.conj())/np.abs(avg)**2)
w_hat_delta_unnorm = np.imag((avg_d * avg.conj()))
w_hat_delta_compressed = np.log(w_hat_delta * np.sign(w_hat_delta) +1e-8) * np.sign(w_hat_delta)
plt.subplot(2,1,1);plt.imshow(w_hat_delta.T[:100],interpolation='nearest',origin='lower',aspect=2,vmin=-.03,vmax=.03,cmap='bone'); plt.subplot(2,1,2); plt.imshow(np.log(np.abs(avg)).T[:100],origin='lower',interpolation='nearest'); plt.show()
plt.subplot(2,1,1);plt.imshow(w_hat_delta_avg.T[:100],interpolation='nearest',origin='lower',aspect=2,vmin=-.03,vmax=.03,cmap='bone'); plt.subplot(2,1,2); plt.imshow(np.log(np.abs(avg)).T[:100],origin='lower',interpolation='nearest'); plt.show()
# now getting the hessian
taper_length=255
order=6
half_time_support=6
h_temp,dh_temp,ddh_temp, tt_temp = hermite_window(taper_length,
order,
half_time_support)
h = np.zeros((h_temp.shape[0],
h_temp.shape[1]+1))
h[:,:-1] = h_temp
dh = np.zeros((dh_temp.shape[0],
dh_temp.shape[1]+1))
dh[:,:-1] = dh_temp
ddh = np.zeros((ddh_temp.shape[0],
ddh_temp.shape[1]+1))
ddh[:,:-1] = ddh_temp
tt = (2*tt_temp[-1] -tt_temp[-2])*np.ones(tt_temp.shape[0]+1)
tt[:-1] = tt_temp
avg = np.zeros(frames.shape,dtype=np.complex128)
avg_d = np.zeros(dframes.shape,dtype=np.complex128)
abs_avg = np.zeros(frames.shape)
avg_dphi_dt = np.zeros(frames.shape)
avg_dphi_dw = np.zeros(frames.shape)
avg_d2phi_dtdw = np.zeros(frames.shape)
avg_d2phi_dw2 = np.zeros(frames.shape)
avg_d2phi_dt2 = np.zeros(frames.shape)
avg_sm_d2phi_dtdw = np.zeros(frames.shape)
avg_sm_d2phi_dw2 = np.zeros(frames.shape)
avg_sm_d2phi_dt2 = np.zeros(frames.shape)
avg_dlogM_dw = np.zeros(frames.shape)
avg_dlogM_dt = np.zeros(frames.shape)
avg_d2logM_dw2 = np.zeros(frames.shape)
avg_d2logM_dt2 = np.zeros(frames.shape)
avg_d2logM_dwdt = np.zeros(frames.shape)
gfilter = np.exp(-((np.mgrid[:8,:8]-3.5)**2).sum(0)/12)
gdwfilter = -(np.mgrid[:8,:8]-3.5)[1]/6 * gfilter
gdtfilter = -(np.mgrid[:8,:8]-3.5)[0]/6 * gfilter
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * h[i])
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
avg += f
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * dh[i])
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
tf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*h[i]))
tf_mv = tf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
tdf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*dh[i]))
tdf_mv = tdf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
ddf = np.fft.fft((x[indices]*greater_than_winlength) * ddh[i])
ddf_mv = ddf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
ttf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*tt*h[i]))
ttf_mv = ttf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
abs_f_mv = np.abs(f_mv)**2
dphi_dt = np.imag((df_mv * f_mv.conj())/abs_f_mv)
dphi_dw = - np.real((tf_mv * f_mv.conj())/abs_f_mv)
dlogM_dt = convolve(np.real(df_mv * f_mv.conj()/abs_f_mv),gfilter)
dlogM_dw = convolve(np.imag(tf_mv * f_mv.conj()/abs_f_mv),gfilter)
d2logM_dt2 = convolve(np.real(df_mv * f_mv.conj()/abs_f_mv),gdtfilter)
d2logM_dw2 = convolve(np.imag(tf_mv * f_mv.conj()/abs_f_mv),gdwfilter)
d2logM_dwdt = convolve(np.imag(tf_mv * f_mv.conj()/abs_f_mv),gdtfilter)
d2phi_dtdw = np.real(tdf_mv * f_mv.conj()/ abs_f_mv) - np.real(tf_mv * df_mv/ f_mv**2)
d2phi_dw2 = np.imag((tf_mv * f_mv.conj()/abs_f_mv)**2) - np.imag(ttf_mv * f_mv.conj()/ abs_f_mv)
d2phi_dt2 = np.imag(ddf_mv * f_mv.conj()/ abs_f_mv) - np.imag((df_mv * f_mv.conj()/abs_f_mv)**2)
# sm_d2phi_dtdw = convolve(np.real(tdf_mv * f_mv.conj()/ abs_f_mv) - np.real(tf_mv * df_mv/ f_mv**2),gfilter)
# sm_d2phi_dw2 = convolve(np.imag((tf_mv * f_mv.conj()/abs_f_mv)**2) - np.imag(ttf_mv * f_mv.conj()/ abs_f_mv),gfilter)
# sm_d2phi_dt2 = convolve(np.imag(ddf_mv * f_mv.conj()/ abs_f_mv) - np.imag((df_mv * f_mv.conj()/abs_f_mv)**2),gfilter)
sm_d2phi_dtdw = median_filter(np.real(tdf_mv * f_mv.conj()/ abs_f_mv) - np.real(tf_mv * df_mv/ f_mv**2),size=(5,5))
sm_d2phi_dw2 =median_filter(np.imag((tf_mv * f_mv.conj()/abs_f_mv)**2) - np.imag(ttf_mv * f_mv.conj()/ abs_f_mv),size=(5,5))
sm_d2phi_dt2 = median_filter(np.imag(ddf_mv * f_mv.conj()/ abs_f_mv) - np.imag((df_mv * f_mv.conj()/abs_f_mv)**2),size=(5,5))
avg_dphi_dt += dphi_dt
avg_dphi_dw += dphi_dw
avg_d2phi_dtdw += d2phi_dtdw
avg_d2phi_dw2 += d2phi_dw2
avg_d2phi_dt2 += d2phi_dt2
avg_sm_d2phi_dtdw += sm_d2phi_dtdw
avg_sm_d2phi_dw2 += sm_d2phi_dw2
avg_sm_d2phi_dt2 += sm_d2phi_dt2
avg_dlogM_dw += dlogM_dw
avg_dlogM_dt = dlogM_dt
avg_d2logM_dw2 += d2logM_dw2
avg_d2logM_dt2 = d2logM_dt2
avg_d2logM_dwdt += d2logM_dwdt
avg/=5
abs_avg/=5
avg_dphi_dt /= 5
avg_dphi_dw /= 5
avg_dlogM_dt /= 5
avg_dlogM_dw /= 5
avg_d2logM_dt2 /= 5
avg_d2logM_dw2 /= 5
avg_d2logM_dwdt /= 5
avg_d2phi_dtdw /=5
avg_d2phi_dw2 /= 5
avg_d2phi_dt2 /= 5
avg_sm_d2phi_dtdw /=5
avg_sm_d2phi_dw2 /= 5
avg_sm_d2phi_dt2 /= 5
# get the eigenvectors
tau = (avg_d2phi_dw2 - avg_d2phi_dt2)/(2*avg_d2phi_dtdw)
t = np.sign(tau)/(np.abs(tau) + np.sqrt(1+tau**2))
c = 1/np.sqrt(1+t**2)
s = c*t
# compute the hessian eigenvectors
l1 = avg_d2phi_dt2 - t* avg_d2phi_dtdw
l2 = avg_d2phi_dw2 + t* avg_d2phi_dtdw
order = (l2 > l1).astype(int)
ltrue1 = l2*(1-order) + l1*order
etrue1_t = s*(1-order) + c*order
etrue1_w = c*(1-order) - s*order
inner_prod = etrue1_t * dphi_dt + etrue1_w * dphi_dw
#across frequency
lfreq = ltrue1 * (ltrue1 < 0) * order
E = np.zeros(avg_dphi_dt.T.shape)
E[:-1] = avg_dphi_dt.T[1:] - avg_dphi_dt.T[:-1]
plt.subplot(2,1,1); plt.imshow(avg_dphi_dt.T[:100],cmap='binary',origin='lower',vmin=-.05,vmax=.05); plt.subplot(2,1,2); plt.imshow(np.log(abs_avg).T[:100],origin='lower'); plt.show()
trinary_dlogM_dw = lambda t: np.sign(avg_dlogM_dw)*( np.abs(avg_dlogM_dw) > t)
plt.imshow(trinary_dlogM_dw(.5).T[:100],origin='lower',cmap='bone',vmin=-1,vmax=1); plt.show()
plt.imshow(avg_dlogM_dw.T[:100],origin='lower',cmap='bone',vmin=-1,vmax=1); plt.show()
from scipy.ndimage.filters import median_filter, convolve
E = median_filter(avg_dphi_dt,size=(3,3))
E_filtered = np.sign(E)*(np.abs(E) > .02)
filter_d2logM_dw2 = convolve(avg_dlogM_dw,gdfilter)
filter_d2phi_dwdt = convolve(avg_dphi_dt,gdwfilter)
filter_d2phi_dt2 = convolve(avg_dphi_dt,gdtfilter)
filter_d2phi_dw2 = convolve(avg_dphi_dw,gdwfilter)
# get the eigenvectors
tau = (filter_d2phi_dw2 - filter_d2phi_dt2)/(2*filter_d2phi_dwdt)
t = np.sign(tau)/(np.abs(tau) + np.sqrt(1+tau**2))
c = np.nan_to_num(1/np.sqrt(1+t**2))
s = c*t
# eigenvalues
l1 = filter_d2phi_dt2 - t* filter_d2phi_dwdt
l2 = filter_d2phi_dw2 + t* filter_d2phi_dwdt
np.abs(s) > np.abs(c)
plt.subplot(2,1,1); plt.imshow(filter_d2phi_dwdt.T[:100]>.1,origin='lower',cmap='bone'); plt.subplot(2,1,2); plt.imshow(np.log(abs_avg).T[:100],origin='lower'); plt.show()
trinary_d2phi_dt2 = lambda t: np.sign(filter_d2phi_dt2)*( np.abs(filter_d2phi_dt2) > t)
plt.subplot(2,1,1); plt.imshow(((filter_d2phi_dwdt>.12)*trinary_d2phi_dt2(.01)).T[:100],origin='lower',cmap='bone'); plt.subplot(2,1,2); plt.imshow(np.log(abs_avg).T[:100],origin='lower'); plt.show()
hs = []
for i in xrange(4,10):
taper_length=255
order=6
half_time_support=i
h_temp1,dh_temp1,ddh_temp1, tt_temp1 = hermite_window(taper_length,
order,
half_time_support)
hs.append(h_temp1[0])
# now getting the hessian also get a wider filter
# this doesn
taper_length=255
order=6
half_time_support=12
h_temp,dh_temp,ddh_temp, tt_temp = hermite_window(taper_length,
order,
half_time_support)
h = np.zeros((h_temp.shape[0],
h_temp.shape[1]+1))
h[:,:-1] = h_temp
dh = np.zeros((dh_temp.shape[0],
dh_temp.shape[1]+1))
dh[:,:-1] = dh_temp
ddh = np.zeros((ddh_temp.shape[0],
ddh_temp.shape[1]+1))
ddh[:,:-1] = ddh_temp
tt = (2*tt_temp[-1] -tt_temp[-2])*np.ones(tt_temp.shape[0]+1)
tt[:-1] = tt_temp
avg = np.zeros(frames.shape,dtype=np.complex128)
avg_d = np.zeros(dframes.shape,dtype=np.complex128)
abs_avg = np.zeros(frames.shape)
avg_dphi_dt = np.zeros(frames.shape)
avg_dphi_dw = np.zeros(frames.shape)
avg_d2phi_dtdw = np.zeros(frames.shape)
avg_d2phi_dw2 = np.zeros(frames.shape)
avg_d2phi_dt2 = np.zeros(frames.shape)
for i in xrange(5):
f = np.fft.fft((x[indices]*greater_than_winlength) * h[i])
f_mv = f * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
avg += f
abs_avg += np.abs(f_mv)
df = np.fft.fft((x[indices]*greater_than_winlength) * dh[i])
df_mv = df * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
tf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*h[i]))
tf_mv = tf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
tdf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*dh[i]))
tdf_mv = tdf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
ddf = np.fft.fft((x[indices]*greater_than_winlength) * ddh[i])
ddf_mv = ddf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
ttf = np.fft.fft((x[indices]*greater_than_winlength) * (tt*tt*h[i]))
ttf_mv = ttf * np.exp(2j*np.pi*np.outer(indices[:,0],np.arange(indices.shape[1]))/N1)
abs_f_mv = np.abs(f_mv)**2
dphi_dt = np.imag((df_mv * f_mv.conj())/abs_f_mv)
dphi_dw = - np.real((tf_mv * f_mv.conj())/abs_f_mv)
d2phi_dtdw = np.real(tdf_mv * f_mv.conj()/ abs_f_mv) - np.real(tf_mv * df_mv/ f_mv**2)
d2phi_dw2 = np.imag((tf_mv * f_mv.conj()/abs_f_mv)**2) - np.imag(ttf_mv * f_mv.conj()/ abs_f_mv)
d2phi_dt2 = np.imag(ddf_mv * f_mv.conj()/ abs_f_mv) - np.imag((df_mv * f_mv.conj()/abs_f_mv)**2)
avg_dphi_dt += dphi_dt
avg_dphi_dw += dphi_dw
avg_d2phi_dtdw += d2phi_dtdw
avg_d2phi_dw2 += d2phi_dw2
avg_d2phi_dt2 += d2phi_dt2
avg/=5
abs_avg/=5
avg_dphi_dt /= 5
avg_dphi_dw /= 5
avg_d2phi_dtdw /=5
avg_d2phi_dw2 /= 5
avg_d2phi_dt2 /= 5
# get the eigenvectors
tau = (avg_d2phi_dw2 - avg_d2phi_dt2)/(2*avg_d2phi_dtdw)
t = np.sign(tau)/(np.abs(tau) + np.sqrt(1+tau**2))
c = 1/np.sqrt(1+t**2)
s = c*t
# compute the hessian eigenvectors
l1 = avg_d2phi_dt2 - t* avg_d2phi_dtdw
l2 = avg_d2phi_dw2 + t* avg_d2phi_dtdw
order = (l2 > l1).astype(int)
ltrue1 = l2*(1-order) + l1*order
etrue1_t = s*(1-order) + c*order
etrue1_w = c*(1-order) - s*order
inner_prod = etrue1_t * dphi_dt + etrue1_w * dphi_dw
#across frequency
lfreq = ltrue1 * (ltrue1 < 0) * order
E = np.zeros(avg_dphi_dt.T.shape)
E[:-1] = avg_dphi_dt.T[1:] - avg_dphi_dt.T[:-1]
plt.subplot(2,1,1); plt.imshow(avg_dphi_dt.T[:100],cmap='binary',origin='lower',vmin=-.05,vmax=.05); plt.subplot(2,1,2); plt.imshow(np.log(abs_avg).T[:100],origin='lower'); plt.show()
| gpl-3.0 |
fabioticconi/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 59 | 35604 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
ScienceStacks/JViz | mysite/scisheets/core/test_api.py | 2 | 8433 | '''Tests for formulas API'''
from api import API, APIFormulas, APIPlugin, APIAdmin
from column import Column
from table import Table
import helpers_test as ht
#from helpers.trinary import Trinary
import table_evaluator as te
import numpy as np
import os
import pandas as pd
import random
import unittest
COLUMN1 = "Col_1"
COLUMN2 = "Col_2"
COLUMN3 = "Col_3"
TRUTH_COLUMNS = ['A', 'B']
COLUMN1_VALUES = range(10)
TEST_FILE1 = "test_api_1"
IGNORE_TEST = False
#############################
# Tests
#############################
# pylint: disable=W0212,C0111,R0904
class TestAPI(unittest.TestCase):
def setUp(self):
self.api = API()
self.api._table = ht.createTable("test", column_name=COLUMN1)
self.column1 = self.api._table.columnFromName(COLUMN1,
is_relative=False)
self.column1.addCells(COLUMN1_VALUES, replace=True)
self.api.setColumnVariables(nodenms=[COLUMN1])
ht.setupTableInitialization(self)
def testGetColumnValues(self):
if IGNORE_TEST:
return
values = self.api.getColumnValue(COLUMN1)
self.assertTrue(all(values == COLUMN1_VALUES))
def testSetColumnValues(self):
if IGNORE_TEST:
return
new_column1_values = list(COLUMN1_VALUES)
new_column1_values.extend(range(5))
self.api.setColumnValue(COLUMN1, new_column1_values)
values = self.api.getColumnValue(COLUMN1)
self.assertTrue(all(values == np.array(new_column1_values)))
def testColumnVisibility(self):
if IGNORE_TEST:
return
names = ['row']
column = self.api._table.columnFromName(names[0],
is_relative=False)
self.assertEqual(len(self.api._table._hidden_children), 0)
self.api.setColumnVisibility(names, is_visible=False)
self.assertEqual(len(self.api._table._hidden_children), 1)
self.assertTrue(column in self.api._table._hidden_children)
self.api.setColumnVisibility(names, is_visible=True)
self.assertEqual(len(self.api._table._hidden_children), 0)
def testSetColumnVariables(self):
if IGNORE_TEST:
return
table = self.api.getTable()
self.api.setColumnVariables()
columns = [c for c in table.getColumns(is_attached=False)
if not Table.isNameColumn(c)]
for column in columns:
if not column.getName(is_global_name=False) \
in table.getNamespace():
import pdb; pdb.set_trace()
self.assertTrue(column.getName(is_global_name=False)
in table.getNamespace())
new_column_name = "New_Column"
new_column = Column(new_column_name)
table.addColumn(new_column)
self.api.setColumnVariables()
self.assertTrue(new_column_name in table.getNamespace())
def testSetColumnVariablesWithColnmsOption(self):
if IGNORE_TEST:
return
table = self.api.getTable()
self.api.setColumnVariables()
old_cv_dict = {cv.getName(): cv
for cv in self.api.getColumnVariables()}
self.api.setColumnVariables(nodenms=[COLUMN1])
cv_names = [cv.getName() for cv in self.api.getColumnVariables()]
self.assertTrue(COLUMN1 in cv_names)
for cv_name in cv_names:
if cv_name != COLUMN1:
self.assertEqual(old_cv_dict[cv_name],
self.api.getColumnVariable(cv_name))
# pylint: disable=W0212,C0111,R0904
class TestAPIFormulas(unittest.TestCase):
def setUp(self):
table = ht.createTable("test", column_name=COLUMN1)
self.api = APIFormulas(table)
ht.setupTableInitialization(self)
def testGetValidatedColumn(self):
if IGNORE_TEST:
return
column = self.api.getColumn(COLUMN1)
self.assertEqual(column.getName(), COLUMN1)
def _createColumn(self):
self.api.createColumn(COLUMN2)
return self.api.getColumn(COLUMN2)
def testCreateColumn(self):
if IGNORE_TEST:
return
column = self._createColumn()
self.assertEqual(column.getName(), COLUMN2)
def testDeleteColumn(self):
if IGNORE_TEST:
return
_ = self._createColumn()
self.api.deleteColumn(COLUMN2)
is_absent = all([c.getName() != COLUMN2 \
for c in self.api._table.getColumns()])
self.assertTrue(is_absent)
_ = self._createColumn()
self.api.deleteColumn(2)
is_absent = all([c.getName() != COLUMN2 \
for c in self.api._table.getColumns()])
self.assertTrue(is_absent)
def testCreateTruthTable(self):
return # Don't test TruthTable since not completed
self._createTruthTable()
for n in range(len(TRUTH_COLUMNS)):
self.assertTrue(any([c.getName() == TRUTH_COLUMNS[n]
for c in self.api._table.getColumns()]))
def _OldcreateDataframe(self, prefix="", names=None):
df = pd.DataFrame()
data = {}
if names is None:
names = [c.getNames() for c in self.api.getTable().getColumns()]
if len(names) >= 3:
data[names[2]] = [100.0, 200.0, 300.0]
if len(names) >= 2:
data[names[1]] = [10.1, 20.0, 30.0]
if len(names) >= 1:
data[names[0]] = ["one", "two", "three"]
for name in names:
df[name] = data[name]
return df
def _createDataframe(self, prefix="", names=None):
if names is None:
data = self.api.getTable().getData()
if 'row' in data:
import pdb; pdb.set_trace()
pass
else:
data = {}
if len(names) >= 3:
data[names[2]] = [100.0, 200.0, 300.0, 400.0, 500.0]
if len(names) >= 2:
data[names[1]] = [10.1, 20.0, 30.0, 40.0, 50.0]
if len(names) >= 1:
data[names[0]] = ["one", "two", "three", "four", "five"]
df = pd.DataFrame(data)
return df
def _TableContainsDataframe(self, table, dataframe, names=None):
if names is None:
names = list(set(dataframe.columns).union( \
table.getColumnNames()))
for name in dataframe.columns:
column = table.columnFromName(name, is_relative=False)
self.assertTrue([dataframe[name].tolist() == column.getCells()])
def testCreateFromDataframe(self):
if IGNORE_TEST:
return
df = self._createDataframe()
table = self.api.dataframeToTable("NewTable", df)
num = len(df.columns)
for name in df.columns:
column = table.columnFromName(name, is_relative=False)
b = all([df[name][n] == column.getCells()[n] \
for n in range(num)])
self.assertTrue(b)
def _testAddColumnsToTableFromDataframe(self, table):
df_col1 = 'A%d' % random.randint(1,100)
df_col2 = 'B%d' % random.randint(1,100)
data = range(10)
df = pd.DataFrame({df_col1: data, df_col2: data})
self.api = API()
self.api.setTable(table)
names = [df_col2, df_col1]
self.api.addColumnsToTableFromDataframe(df, names=names)
column_names = [c.getName(is_global_name=False)
for c in self.table.getColumns()]
for name in names:
self.assertTrue(name in column_names)
column = self.api.getTable().columnFromName(name)
self.assertIsNotNone(column.getParent())
def testAddColumnsToTableFromDataframe(self):
if IGNORE_TEST:
return
ht.setupTableInitialization(self)
self._testAddColumnsToTableFromDataframe(self.table)
self._testAddColumnsToTableFromDataframe(self.subtable)
def _testAddToDataframe(self, names=None):
df = self.api.tableToDataframe(colnms=names)
if names is None:
columns = self.api.getTable().getDataColumns()
else:
columns = \
[self.api.getTable().columnFromName(n, is_relative=False)
for n in names]
self.assertEqual(len(df.columns), len(columns))
for name in df.columns:
column = self.api.getTable().columnFromName(name,
is_relative=False)
self.assertTrue(list(df[name]) == column.getCells())
def testAddToDataframe(self):
if IGNORE_TEST:
return
self.api._table = self.table
self._testAddToDataframe()
self._testAddToDataframe(names=['DUMMY1_COLUMN'])
# pylint: disable=W0212,C0111,R0904
class TestAPIPlugin(unittest.TestCase):
def setUp(self):
table = ht.createTable("test", column_name=COLUMN1)
self.api = APIPlugin(table.getFilepath())
self.api.initialize()
ht.setupTableInitialization(self)
# pylint: disable=W0212,C0111,R0904
class TestAPIAdmin(unittest.TestCase):
def setUp(self):
table = ht.createTable("test", column_name=[COLUMN1, COLUMN2])
self.api = APIAdmin(table.getFilepath())
self.api.initialize()
ht.setupTableInitialization(self)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/viz/tests/test_epochs.py | 1 | 5564 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_raises
import numpy as np
from mne import io, read_events, Epochs
from mne import pick_types
from mne.utils import run_tests_if_main, requires_version
from mne.channels import read_layout
from mne.viz import plot_drop_log, plot_epochs_image, plot_image_epochs
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 1.0
n_chan = 15
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_epochs():
"""Test epoch plotting"""
import matplotlib.pyplot as plt
epochs = _get_epochs()
epochs.plot(scalings=None, title='Epochs')
plt.close('all')
fig = epochs[0].plot(picks=[0, 2, 3], scalings=None)
fig.canvas.key_press_event('escape')
plt.close('all')
fig = epochs.plot()
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('right')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('b')
fig.canvas.key_press_event('f11')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('?')
fig.canvas.key_press_event('h')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('end')
fig.canvas.resize_event()
fig.canvas.close_event() # closing and epoch dropping
plt.close('all')
assert_raises(RuntimeError, epochs.plot, picks=[])
plt.close('all')
with warnings.catch_warnings(record=True):
fig = epochs.plot()
# test mouse clicks
x = fig.get_axes()[0].get_xlim()[1] / 2
y = fig.get_axes()[0].get_ylim()[0] / 2
data_ax = fig.get_axes()[0]
n_epochs = len(epochs)
_fake_click(fig, data_ax, [x, y], xform='data') # mark a bad epoch
_fake_click(fig, data_ax, [x, y], xform='data') # unmark a bad epoch
_fake_click(fig, data_ax, [0.5, 0.999]) # click elsewhere in 1st axes
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
_fake_click(fig, data_ax, [-0.1, 0.9], button=3)
_fake_click(fig, fig.get_axes()[2], [0.5, 0.5]) # change epochs
_fake_click(fig, fig.get_axes()[3], [0.5, 0.5]) # change channels
fig.canvas.close_event() # closing and epoch dropping
assert(n_epochs - 1 == len(epochs))
plt.close('all')
def test_plot_epochs_image():
"""Test plotting of epochs image
"""
import matplotlib.pyplot as plt
epochs = _get_epochs()
plot_epochs_image(epochs, picks=[1, 2])
plt.close('all')
with warnings.catch_warnings(record=True):
plot_image_epochs(epochs, picks=[1, 2])
plt.close('all')
def test_plot_drop_log():
"""Test plotting a drop log
"""
import matplotlib.pyplot as plt
epochs = _get_epochs()
assert_raises(ValueError, epochs.plot_drop_log)
epochs.drop_bad_epochs()
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
epochs.plot_drop_log()
plot_drop_log([['One'], [], []])
plot_drop_log([['One'], ['Two'], []])
plot_drop_log([['One'], ['One', 'Two'], []])
plt.close('all')
@requires_version('scipy', '0.12')
def test_plot_psd_epochs():
"""Test plotting epochs psd (+topomap)
"""
import matplotlib.pyplot as plt
epochs = _get_epochs()
epochs.plot_psd()
assert_raises(RuntimeError, epochs.plot_psd_topomap,
bands=[(0, 0.01, 'foo')]) # no freqs in range
epochs.plot_psd_topomap()
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
arahuja/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
cosurgi/trunk | doc/sphinx/ipython_directive013.py | 2 | 27515 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
From: https://github.com/ipython/ipython/blob/master/docs/sphinxext/ipython_directive.py
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import io
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import matplotlib
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
try:
from sphinx.util.compat import Directive
except ImportError:
from docutils.parsers.rst import Directive
matplotlib.use('Agg')
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = list(range(3))
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = io.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print('\n'.join(lines))
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| gpl-2.0 |
wmvanvliet/psychic | psychic/tests/testchain.py | 1 | 4479 | import unittest
import numpy as np
from ..nodes import BaseNode, Chain
from .. import DataSet
try:
from sklearn import svm
from sklearn import linear_model
from sklearn import preprocessing
sklearn_present = True
except:
sklearn_present = False
class NOPNode(BaseNode):
def train_(self, d):
self.d = d
def apply_(self, d):
return d
class AddSumNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
self.train_calls = 0
self.test_calls = 0
self.sum = None
def train_(self, d):
self.sum = np.sum(d.data)
self.train_calls += 1
def apply_(self, d):
self.test_calls += 1
return DataSet(data=d.data + self.sum, default=d)
class TestChain(unittest.TestCase):
def setUp(self):
self.d = DataSet(data=np.ones((1, 10)), labels=np.random.rand(2, 10))
self.nodes = [AddSumNode() for n in range(3)]
self.c = Chain(self.nodes)
def test_chain(self):
d = self.d
self.c.train(d)
np.testing.assert_equal([n.train_calls for n in self.nodes], [1, 1, 1])
np.testing.assert_equal([n.test_calls for n in self.nodes], [1, 1, 0])
np.testing.assert_equal([n.sum for n in self.nodes],
[10, (1 + 10) * 10, (1 + 10 + 110) * 10])
np.testing.assert_equal(self.c.apply(d).data,
1 + 10 + (1 + 10) * 10 + (1 + 10 + (1 + 10) * 10) * 10 * d.data)
np.testing.assert_equal([n.test_calls for n in self.nodes], [2, 2, 1])
def test_train_apply(self):
d = self.d
self.c.train_apply(d)
np.testing.assert_equal([n.train_calls for n in self.nodes], [1, 1, 1])
np.testing.assert_equal([n.test_calls for n in self.nodes], [1, 1, 1])
def test_train_sklearn(self):
if not sklearn_present:
return
ch = Chain([NOPNode(), svm.LinearSVC()])
ch.train(self.d)
self.assertTrue(hasattr(ch.nodes[1], 'coef_'))
ch = Chain([svm.LinearSVC(), NOPNode()])
ch.train(self.d)
self.assertTrue(hasattr(ch.nodes[0], 'coef_'))
self.assertEqual(ch.nodes[1].d.data.shape, (1, 10))
def test_apply_sklearn(self):
if not sklearn_present:
return
labels = np.zeros((2,10))
labels[0,:5] = 1
labels[1,5:] = 1
d = DataSet(np.ones((5,10,5)), labels=labels[:,:5])
d += DataSet(np.zeros((5,10,5)), labels=labels[:,5:],
ids=np.arange(5,10))
# Node that predicts integers (SVM)
ch = Chain([NOPNode(), svm.LinearSVC()])
d2 = ch.train_apply(d)
np.testing.assert_equal(d2.data, d2.labels)
# Node that predicts floats (OLS)
ch = Chain([NOPNode(), linear_model.LinearRegression()])
d2 = ch.train_apply(d)
self.assertEqual(d2.data.shape, (1, 10))
# Node that predicts probabilities
ch = Chain([NOPNode(), linear_model.LogisticRegression()])
d2 = ch.train_apply(d)
self.assertEqual(d2.data.shape, (2, 10))
self.assertTrue(np.all(d2.data > 0))
self.assertTrue(np.all(d2.data < 1))
np.testing.assert_equal(d2.y, np.r_[np.zeros(5), np.ones(5)])
# Node that only implements a transform
ch = Chain([NOPNode(), preprocessing.StandardScaler()])
d2 = ch.train_apply(d)
np.testing.assert_equal(np.mean(d2.data, axis=1), 0)
np.testing.assert_equal(np.std(d2.data, axis=1), 1.)
# When node is not the last node, transform function should be applied,
ch = Chain([preprocessing.StandardScaler(), NOPNode()])
self.assertEqual(ch.train_apply(d), d2)
# When node is not the last node, transform function should be applied,
# but LinearRegression does not have a transform. Predict function
# should be called in this case.
ch = Chain([linear_model.LinearRegression()])
d2 = ch.train_apply(d)
ch = Chain([linear_model.LinearRegression(), NOPNode()])
self.assertEqual(ch.train_apply(d), d2)
def test_str(self):
ch = Chain([AddSumNode(), NOPNode()])
self.assertEqual(str(ch), 'Chain (AddSumNode ->\nNOPNode)')
if sklearn_present:
ch = Chain([linear_model.LinearRegression()])
self.assertEqual(str(ch),
'Chain (LinearRegression(copy_X=True, fit_intercept=True, '+
'n_jobs=1, normalize=False))')
| bsd-3-clause |
fergusbarratt/PhysicsAlgorithms | SmallWorldNetworks/SWN.py | 1 | 4073 | '''does small world networks like in Entropy Order Parameters & Complexity, JP Sethna'''
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rd
import random
import networkx as nx
from copy import copy
class Network(object):
def __init__(self, n_nodes):
self.nodes = list(range(n_nodes))
self.edges = {node:set() for node in self.nodes}
def has_node(self, node):
return node in self.nodes
def add_node(self, node):
if not node in self.nodes:
self.nodes.add(node)
def add_edge(self, node1, node2):
if node1 in self.nodes and node2 in self.nodes:
self.edges[node1].add(node2)
self.edges[node2].add(node1)
else:
self.nodes.add(node1)
self.nodes.add(node2)
self.add_edge(node1, node2)
def get_nodes(self):
return self.nodes
def get_edges(self):
return [(node1, node2) for node1, other_nodes in self.edges.items() for node2 in other_nodes]
def get_neighbours(node):
return [other_node in self.edges[node]]
def find_path_lengths_from_node(self, target_node):
distances = {(target_node, target_node):0}
currentShell = [target_node]
dep=0
while currentShell!=[]:
nextShell=[]
for node in currentShell:
for neighbour in self.edges[node]:
if (target_node, neighbour) not in distances:
nextShell.append(neighbour)
distances[(target_node, neighbour)] = dep+1
dep+=1
currentShell=copy(nextShell)
return distances
def find_all_path_lengths(self):
all_distances={}
for node in self.edges:
all_distances.update(self.find_path_lengths_from_node(node))
return all_distances
def find_average_path_length(self):
return np.mean(list(self.find_all_path_lengths().values()))
def draw(self):
G = nx.Graph()
G.add_nodes_from(self.get_nodes())
G.add_edges_from(self.get_edges())
nx.draw(G)
plt.show()
class SmallWorldNetwork(Network):
def __init__(self, N_nodes, N_joins, p):
super().__init__(N_nodes)
# connect n to it's n nearest neighbours (global)
for node_skip in range(1, N_joins+1):
for ind, node in enumerate(self.nodes):
self.edges[node].add(self.nodes[(ind+node_skip)%len(self.nodes)])
# add number of random edges
for _ in range(int(p*N_nodes*(N_joins/2))):
self.add_random_edge()
def add_random_edge(self):
'''add a new unique random edge (speedup: select from nodes with old node removed, deal with recursion'''
rand_node_1 = random.choice(self.nodes)
rand_node_2 = copy(rand_node_1)
while rand_node_2==rand_node_1:
rand_node_2 = random.choice(self.nodes)
if rand_node_2 in self.edges[rand_node_1] or rand_node_1 in self.edges[rand_node_2]:
self.add_random_edge()
else:
self.edges[rand_node_1].add(rand_node_2)
self.edges[rand_node_2].add(rand_node_1)
def test_small_world_network(L=100, Z=2, p=0.1, draw=False):
test_net = SmallWorldNetwork(L, Z, p)
assert len(test_net.get_edges()) == L*Z+int(p*L*(Z))
print(test_net.find_average_path_length())
if draw:
# draw the network
test_net.draw()
# histogram
vals = np.array(list(test_net.find_all_path_lengths().values()))
plt.hist(vals, 20)
# strogatz plot
Y = np.linspace(0.001, 1, 100)
X = np.array([SmallWorldNetwork(
L, Z, p).find_average_path_length()/
SmallWorldNetwork(L, Z, 0).find_average_path_length() for p in Y])
plt.semilogx(Y, X)
plt.ylim([0, 1.5])
plt.show()
return test_net
def test_network():
new_net = Network(5)
new_net.add_edge(1, 2)
new_net.draw()
test_small_world_network(draw=True)
| mit |
pravsripad/mne-python | mne/epochs.py | 2 | 146518 | # -*- coding: utf-8 -*-
"""Tools for working with epoched data."""
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Daniel Strohmeier <[email protected]>
# Denis Engemann <[email protected]>
# Mainak Jas <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
from collections import Counter
from copy import deepcopy
import json
import operator
import os.path as op
import numpy as np
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float, write_float_matrix,
write_double_matrix, write_complex_float_matrix,
write_complex_double_matrix, write_id, write_string,
_get_split_size, _NEXT_FILE_BUFFER, INT32_MAX)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.fiff.raw import _get_fname_rep
from .io.pick import (channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_DATA_CH_TYPES_SPLIT, _picks_to_idx)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import BaseRaw, TimeMixin
from .bem import _check_origin
from .evoked import EvokedArray, _check_decim
from .baseline import rescale, _log_rescale, _check_baseline
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import detrend, FilterMixin, _check_fun
from .parallel import parallel_func
from .event import _read_events_fif, make_fixed_length_events
from .fixes import _get_args, rng_uniform
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs, plot_drop_log)
from .utils import (_check_fname, check_fname, logger, verbose,
_time_mask, check_random_state, warn, _pl,
sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc,
_check_pandas_installed, _check_preload, GetEpochsMixin,
_prepare_read_metadata, _prepare_write_metadata,
_check_event_id, _gen_events, _check_option,
_check_combine, ShiftTimeMixin, _build_data_frame,
_check_pandas_index_arguments, _convert_times,
_scale_dataframe_data, _check_time_format, object_size,
_on_missing, _validate_type, _ensure_events)
from .utils.docs import fill_doc
from .data.html_templates import epochs_template
def _pack_reject_params(epochs):
reject_params = dict()
for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'):
val = getattr(epochs, key, None)
if val is not None:
reject_params[key] = val
return reject_params
def _save_split(epochs, fname, part_idx, n_parts, fmt):
"""Split epochs.
Anything new added to this function also needs to be added to
BaseEpochs.save to account for new file sizes.
"""
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
if part_idx > 0:
fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
next_fname = None
if part_idx < n_parts - 1:
next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
base[idx + 1:]))
next_idx = part_idx + 1
else:
next_idx = None
with start_file(fname) as fid:
_save_part(fid, epochs, fmt, n_parts, next_fname, next_idx)
def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx):
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
_check_option('fmt', fmt, ['single', 'double'])
if np.iscomplexobj(data):
if fmt == 'single':
write_function = write_complex_float_matrix
elif fmt == 'double':
write_function = write_complex_double_matrix
else:
if fmt == 'single':
write_function = write_float_matrix
elif fmt == 'double':
write_function = write_double_matrix
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id))
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# Metadata
if epochs.metadata is not None:
start_block(fid, FIFF.FIFFB_MNE_METADATA)
metadata = _prepare_write_metadata(epochs.metadata)
write_string(fid, FIFF.FIFF_DESCRIPTION, metadata)
end_block(fid, FIFF.FIFFB_MNE_METADATA)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_function(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
reject_params = _pack_reject_params(epochs)
if reject_params:
write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT,
json.dumps(reject_params))
write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _event_id_string(event_id):
return ';'.join([k + ':' + str(v) for k, v in event_id.items()])
def _merge_events(events, event_id, selection):
"""Merge repeated events."""
event_id = event_id.copy()
new_events = events.copy()
event_idxs_to_delete = list()
unique_events, counts = np.unique(events[:, 0], return_counts=True)
for ev in unique_events[counts > 1]:
# indices at which the non-unique events happened
idxs = (events[:, 0] == ev).nonzero()[0]
# Figure out new value for events[:, 1]. Set to 0, if mixed vals exist
unique_priors = np.unique(events[idxs, 1])
new_prior = unique_priors[0] if len(unique_priors) == 1 else 0
# If duplicate time samples have same event val, "merge" == "drop"
# and no new event_id key will be created
ev_vals = np.unique(events[idxs, 2])
if len(ev_vals) <= 1:
new_event_val = ev_vals[0]
# Else, make a new event_id for the merged event
else:
# Find all event_id keys involved in duplicated events. These
# keys will be merged to become a new entry in "event_id"
event_id_keys = list(event_id.keys())
event_id_vals = list(event_id.values())
new_key_comps = [event_id_keys[event_id_vals.index(value)]
for value in ev_vals]
# Check if we already have an entry for merged keys of duplicate
# events ... if yes, reuse it
for key in event_id:
if set(key.split('/')) == set(new_key_comps):
new_event_val = event_id[key]
break
# Else, find an unused value for the new key and make an entry into
# the event_id dict
else:
ev_vals = np.unique(
np.concatenate((list(event_id.values()),
events[:, 1:].flatten()),
axis=0))
if ev_vals[0] > 1:
new_event_val = 1
else:
diffs = np.diff(ev_vals)
idx = np.where(diffs > 1)[0]
idx = -1 if len(idx) == 0 else idx[0]
new_event_val = ev_vals[idx] + 1
new_event_id_key = '/'.join(sorted(new_key_comps))
event_id[new_event_id_key] = int(new_event_val)
# Replace duplicate event times with merged event and remember which
# duplicate indices to delete later
new_events[idxs[0], 1] = new_prior
new_events[idxs[0], 2] = new_event_val
event_idxs_to_delete.extend(idxs[1:])
# Delete duplicate event idxs
new_events = np.delete(new_events, event_idxs_to_delete, 0)
new_selection = np.delete(selection, event_idxs_to_delete, 0)
return new_events, event_id, new_selection
def _handle_event_repeated(events, event_id, event_repeated, selection,
drop_log):
"""Handle repeated events.
Note that drop_log will be modified inplace
"""
assert len(events) == len(selection)
selection = np.asarray(selection)
unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True)
# Return early if no duplicates
if len(unique_events) == len(events):
return events, event_id, selection, drop_log
# Else, we have duplicates. Triage ...
_check_option('event_repeated', event_repeated, ['error', 'drop', 'merge'])
drop_log = list(drop_log)
if event_repeated == 'error':
raise RuntimeError('Event time samples were not unique. Consider '
'setting the `event_repeated` parameter."')
elif event_repeated == 'drop':
logger.info('Multiple event values for single event times found. '
'Keeping the first occurrence and dropping all others.')
new_events = events[u_ev_idxs]
new_selection = selection[u_ev_idxs]
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',)
selection = new_selection
elif event_repeated == 'merge':
logger.info('Multiple event values for single event times found. '
'Creating new event value to reflect simultaneous events.')
new_events, event_id, new_selection = \
_merge_events(events, event_id, selection)
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',)
selection = new_selection
drop_log = tuple(drop_log)
# Remove obsolete kv-pairs from event_id after handling
keys = new_events[:, 1:].flatten()
event_id = {k: v for k, v in event_id.items() if v in keys}
return new_events, event_id, selection, drop_log
@fill_doc
class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
TimeMixin, SizeMixin, GetEpochsMixin):
"""Abstract base class for `~mne.Epochs`-type classes.
.. warning:: This class provides basic functionality and should never be
instantiated directly.
Parameters
----------
info : dict
A copy of the `~mne.Info` dictionary from the raw object.
data : ndarray | None
If ``None``, data will be read from the Raw object. If ndarray, must be
of shape (n_epochs, n_channels, n_times).
%(epochs_events_event_id)s
%(epochs_tmin_tmax)s
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(epochs_raw)s
%(picks_all)s
%(reject_epochs)s
%(flat)s
%(decim)s
%(epochs_reject_tmin_tmax)s
%(epochs_detrend)s
%(proj_epochs)s
%(epochs_on_missing)s
preload_at_end : bool
%(epochs_preload)s
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
filename : str | None
The filename (if the epochs are read from disk).
%(epochs_metadata)s
%(epochs_event_repeated)s
%(verbose)s
Notes
-----
The ``BaseEpochs`` class is public to allow for stable type-checking in
user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be
used as a constructor for Epochs objects (use instead :class:`mne.Epochs`).
"""
@verbose
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None, picks=None, reject=None,
flat=None, decim=1, reject_tmin=None, reject_tmax=None,
detrend=None, proj=True, on_missing='raise',
preload_at_end=False, selection=None, drop_log=None,
filename=None, metadata=None, event_repeated='error',
verbose=None): # noqa: D102
self.verbose = verbose
if events is not None: # RtEpochs can have events=None
events = _ensure_events(events)
events_max = events.max()
if events_max > INT32_MAX:
raise ValueError(
f'events array values must not exceed {INT32_MAX}, '
f'got {events_max}')
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
self.metadata = metadata
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError('selection must be shape %s got shape %s'
% (selected.shape, selection.shape))
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ('IGNORED',)
for k in range(max(len(self.events),
max(self.selection) + 1)))
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = \
_handle_event_repeated(
self.events, self.event_id, event_repeated,
self.selection, self.drop_log)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
else:
self.drop_log = tuple()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError('detrend must be None, 0, or 1')
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none='all', exclude=(),
allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
self._do_baseline = True
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
if data.shape[0] != len(self.events):
raise ValueError(
'The number of epochs and the number of events must match')
self.preload = True
self._data = data
self._do_baseline = False
self._offset = None
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
# check reject_tmin and reject_tmax
if reject_tmin is not None:
if (np.isclose(reject_tmin, tmin)):
# adjust for potential small deviations due to sampling freq
reject_tmin = self.tmin
elif reject_tmin < tmin:
raise ValueError(f'reject_tmin needs to be None or >= tmin '
f'(got {reject_tmin})')
if reject_tmax is not None:
if (np.isclose(reject_tmax, tmax)):
# adjust for potential small deviations due to sampling freq
reject_tmax = self.tmax
elif reject_tmax > tmax:
raise ValueError(f'reject_tmax needs to be None or <= tmax '
f'(got {reject_tmax})')
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError(f'reject_tmin ({reject_tmin}) needs to be '
f' < reject_tmax ({reject_tmax})')
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
# decimation
self._decim = 1
self.decimate(decim)
# baseline correction: replace `None` tuple elements with actual times
self.baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.baseline is not None and self.baseline != baseline:
logger.info(f'Setting baseline interval to '
f'[{self.baseline[0]}, {self.baseline[1]}] sec')
logger.info(_log_rescale(self.baseline))
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
self._check_consistency()
def _check_consistency(self):
"""Check invariants of epochs object."""
if hasattr(self, 'events'):
assert len(self.selection) == len(self.events)
assert len(self.drop_log) >= len(self.events)
assert len(self.selection) == sum(
(len(dl) == 0 for dl in self.drop_log))
assert hasattr(self, '_times_readonly')
assert not self.times.flags['WRITEABLE']
assert isinstance(self.drop_log, tuple)
assert all(isinstance(log, tuple) for log in self.drop_log)
assert all(isinstance(s, str) for log in self.drop_log for s in log)
def reset_drop_log_selection(self):
"""Reset the drop_log and selection entries.
This method will simplify ``self.drop_log`` and ``self.selection``
so that they are meaningless (tuple of empty tuples and increasing
integers, respectively). This can be useful when concatenating
many Epochs instances, as ``drop_log`` can accumulate many entries
which can become problematic when saving.
"""
self.selection = np.arange(len(self.events))
self.drop_log = (tuple(),) * len(self.events)
self._check_consistency()
def load_data(self):
"""Load the data if not already preloaded.
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return self
self._data = self._get_data()
self.preload = True
self._do_baseline = False
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
self._raw = None # shouldn't need it anymore
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the epochs.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
See Also
--------
mne.Evoked.decimate
mne.Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
If ``decim`` is 1, this method does not copy the underlying data.
.. versionadded:: 0.10.0
References
----------
.. footbibliography::
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(-self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
self._decim *= decim
i_start = start_idx % self._decim + offset
decim_slice = slice(i_start, None, self._decim)
self.info['sfreq'] = new_sfreq
if self.preload:
if decim != 1:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
else:
self._data = np.ascontiguousarray(self._data)
self._decim_slice = slice(None)
self._decim = 1
else:
self._decim_slice = decim_slice
self._set_times(self._raw_times[self._decim_slice])
return self
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct epochs.
Parameters
----------
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times, but can never be
reverted once the data has been loaded.
.. versionadded:: 0.10.0
"""
baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
if self.preload:
if self.baseline is not None and baseline is None:
raise RuntimeError('You cannot remove baseline correction '
'from preloaded data once it has been '
'applied.')
self._do_baseline = True
picks = self._detrend_picks
rescale(self._data, self.times, baseline, copy=False, picks=picks)
self._do_baseline = False
else: # logging happens in "rescale" in "if" branch
logger.info(_log_rescale(baseline))
assert self._do_baseline is True
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Set self._reject_time and self._channel_type_idx."""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
for key in set(reject.keys()).union(old_reject.keys()):
old = old_reject.get(key, np.inf)
new = reject.get(key, np.inf)
if new > old:
raise ValueError(bad_msg.format(kind='reject', key=key,
new=new, old=old, op='>'))
for key in set(flat.keys()).union(old_flat.keys()):
old = old_flat.get(key, -np.inf)
new = flat.get(key, -np.inf)
if new < old:
raise ValueError(bad_msg.format(kind='flat', key=key,
new=new, old=old, op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose # verbose is used by mne-realtime
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good."""
if isinstance(data, str):
return False, (data,)
if data is None:
return False, ('NO_DATA',)
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ('TOO_SHORT',)
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, picks, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim.
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, str):
return epoch
# Detrend
if self.detrend is not None:
# We explicitly detrend just data channels (not EMG, ECG, EOG which
# are processed by baseline correction)
use_picks = _pick_data_channels(self.info, exclude=())
epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1)
# Baseline correct
if self._do_baseline:
rescale(
epoch, self._raw_times, self.baseline, picks=picks, copy=False,
verbose=False)
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
# handle offset
if self._offset is not None:
epoch += self._offset
return epoch
def iter_evoked(self, copy=False):
"""Iterate over epochs as a sequence of Evoked objects.
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
This method resets the object iteration state to the first epoch.
Parameters
----------
copy : bool
If False copies of data and measurement info will be omitted
to save time.
"""
self.__iter__()
while True:
try:
out = self.__next__(True)
except StopIteration:
break
data, event_id = out
tmin = self.times[0]
info = self.info
if copy:
info = deepcopy(self.info)
data = data.copy()
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch.
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1]_.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
References
----------
.. [1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float64)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
@fill_doc
def average(self, picks=None, method="mean"):
"""Compute an average over epochs.
Parameters
----------
%(picks_all_data)s
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned.
Otherwise, must be a callable which, when passed an array of shape
(n_epochs, n_channels, n_time) returns an array of shape
(n_channels, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
Returns
-------
evoked : instance of Evoked | dict of Evoked
The averaged epochs.
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
When picks is None and epochs contain only ICA channels, no channels
are selected, resulting in an error. This is because ICA channels
are not considered data channels (they are of misc type) and only data
channels are selected when picks is None.
The ``method`` parameter allows e.g. robust averaging.
For example, one could do:
>>> from scipy.stats import trim_mean # doctest:+SKIP
>>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP
>>> epochs.average(method=trim) # doctest:+SKIP
This would compute the trimmed mean.
"""
return self._compute_aggregate(picks=picks, mode=method)
@fill_doc
def standard_error(self, picks=None):
"""Compute standard error over epochs.
Parameters
----------
%(picks_all_data)s
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_aggregate(picks, "std")
def _compute_aggregate(self, picks, mode='mean'):
"""Compute the mean, median, or std over epochs and return Evoked."""
# if instance contains ICA channels they won't be included unless picks
# is specified
if picks is None:
check_ICA = [x.startswith('ICA') for x in self.ch_names]
if np.all(check_ICA):
raise TypeError('picks must be specified (i.e. not None) for '
'ICA channel data')
elif np.any(check_ICA):
warn('ICA channels will not be included unless explicitly '
'selected in picks')
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = _check_combine(mode, valid=('mean', 'median', 'std'))
data = fun(self._data)
assert len(self.events) == len(self._data)
if data.shape != self._data.shape[1:]:
raise RuntimeError(
'You passed a function that resulted n data of shape {}, '
'but it should be {}.'.format(
data.shape, self._data.shape[1:]))
else:
if mode not in {"mean", "std"}:
raise ValueError("If data are not preloaded, can only compute "
"mean or standard deviation.")
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
if np.iscomplexobj(e):
data = data.astype(np.complex128)
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if mode == "std":
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if mode == "std":
kind = 'standard_error'
data /= np.sqrt(n_events)
else:
kind = "average"
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind, self._name)
@property
def _name(self):
"""Give a nice string representation based on event ids."""
if len(self.event_id) == 1:
comment = next(iter(self.event_id.keys()))
else:
count = Counter(self.events[:, 2])
comments = list()
for key, value in self.event_id.items():
comments.append('%.2f × %s' % (
float(count[value]) / len(self.events), key))
comment = ' + '.join(comments)
return comment
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind,
comment):
"""Create an evoked object from epoch data."""
info = deepcopy(info)
# don't apply baseline correction; we'll set evoked.baseline manually
evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment,
nave=n_events, kind=kind, baseline=None,
verbose=self.verbose)
evoked.baseline = self.baseline
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
# pick channels
picks = _picks_to_idx(self.info, picks, 'data_or_ica', ())
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@copy_function_doc_to_method_doc(plot_epochs)
def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto', noise_cov=None,
butterfly=False, show_scrollbars=True, epoch_colors=None,
event_id=None, group_by='type'):
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, events=events, event_color=event_color,
order=order, show=show, block=block, decim=decim,
noise_cov=noise_cov, butterfly=butterfly,
show_scrollbars=show_scrollbars,
epoch_colors=epoch_colors, event_id=event_id,
group_by=group_by)
@copy_function_doc_to_method_doc(plot_epochs_psd)
def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, verbose=None):
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, tmin=tmin,
tmax=tmax, proj=proj, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias,
normalization=normalization, picks=picks, ax=ax,
color=color, xscale=xscale, area_mode=area_mode,
area_alpha=area_alpha, dB=dB, estimate=estimate,
show=show, n_jobs=n_jobs, average=average,
line_alpha=line_alpha,
spatial_colors=spatial_colors, sphere=sphere,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_epochs_psd_topomap)
def plot_psd_topomap(self, bands=None, tmin=None,
tmax=None, proj=False, bandwidth=None, adaptive=False,
low_bias=True, normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
return plot_epochs_psd_topomap(
self, bands=bands, tmin=tmin, tmax=tmax,
proj=proj, bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization, ch_type=ch_type,
cmap=cmap, agg_fun=agg_fun, dB=dB, n_jobs=n_jobs,
normalize=normalize, cbar_fmt=cbar_fmt, outlines=outlines,
axes=axes, show=show, sphere=sphere, vlim=vlim, verbose=verbose)
@copy_function_doc_to_method_doc(plot_topo_image_epochs)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', show=True):
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor,
fig_background=fig_background, font_color=font_color, show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :meth:`mne.Epochs.load_data()`.
Parameters
----------
%(reject_drop_bad)s
%(flat_drop_bad)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy <mne.Epochs.copy>` should be
used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, str) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False, verbose=verbose)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
@copy_function_doc_to_method_doc(plot_drop_log)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown subj',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
@copy_function_doc_to_method_doc(plot_epochs_image)
def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, show=True, units=None,
scalings=None, cmap=None, fig=None, axes=None,
overlay_times=None, combine=None, group_by=None,
evoked=True, ts_args=None, title=None, clear=False):
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig, axes=axes,
overlay_times=overlay_times, combine=combine,
group_by=group_by, evoked=evoked,
ts_args=ts_args, title=title, clear=clear)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask.
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :meth:`mne.Epochs.drop_bad` or
:meth:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of int or bool
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
try_idx = np.where(indices < 0, indices + len(self.events), indices)
out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
keep = np.setdiff1d(np.arange(len(self.events)), try_idx)
self._getitem(keep, reason, copy=False, drop_event_id=False)
count = len(try_idx)
logger.info('Dropped %d epoch%s: %s' %
(count, _pl(count), ', '.join(map(str, np.sort(try_idx)))))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Get a given epoch from disk."""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Process a raw epoch based on the delayed param."""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, str):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, picks=None, item=None, verbose=None):
"""Load all data, dropping bad epochs along the way.
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
%(picks_all)s
%(verbose_meth)s
"""
if item is None:
item = slice(None)
elif not self._bad_dropped:
raise ValueError(
'item must be None in epochs.get_data() unless bads have been '
'dropped. Consider using epochs.drop_bad().')
select = self._item_to_select(item) # indices or slice
use_idx = np.arange(len(self.events))[select]
n_events = len(use_idx)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
logger.info('Loading data for %s events and %s original time '
'points ...' % (n_events, len(self._raw_times)))
if self._bad_dropped:
if not out:
return
if self.preload:
data = data[select]
if picks is None:
return data
else:
picks = _picks_to_idx(self.info, picks)
return data[:, picks]
# we need to load from disk, drop, and return data
detrend_picks = self._detrend_picks
for ii, idx in enumerate(use_idx):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if ii == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[ii] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
drop_log = list(self.drop_log)
assert n_events == len(self.selection)
if not self.preload:
detrend_picks = self._detrend_picks
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(
epoch_noproj, detrend_picks)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, bad_tuple = self._is_good_epoch(
epoch, verbose=verbose)
if not is_good:
assert isinstance(bad_tuple, tuple)
assert all(isinstance(x, str) for x in bad_tuple)
drop_log[sel] = drop_log[sel] + bad_tuple
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self.drop_log = tuple(drop_log)
del drop_log
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']:
data.resize((n_out,) + data.shape[1:], refcheck=False)
else:
data = data[:n_out]
if self.preload:
self._data = data
# Now update our properties (excepd data, which is already fixed)
self._getitem(good_idx, None, copy=False, drop_event_id=False,
select_data=False)
if out:
if picks is None:
return data
else:
picks = _picks_to_idx(self.info, picks)
return data[:, picks]
else:
return None
@property
def _detrend_picks(self):
if self._do_baseline:
return _pick_data_channels(
self.info, with_ref_meg=True, with_aux=True, exclude=())
else:
return []
@fill_doc
def get_data(self, picks=None, item=None):
"""Get all epochs as a 3D array.
Parameters
----------
%(picks_all)s
item : slice | array-like | str | list | None
The items to get. See :meth:`mne.Epochs.__getitem__` for
a description of valid options. This can be substantially faster
for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__`
for repeated access on large Epochs objects.
None (default) is an alias for ``slice(None)``.
.. versionadded:: 0.20
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A view on epochs data.
"""
return self._get_data(picks=picks, item=item)
@verbose
def apply_function(self, fun, picks=None, dtype=None, n_jobs=1,
channel_wise=True, verbose=None, **kwargs):
"""Apply a function to a subset of channels.
%(applyfun_summary_epochs)s
Parameters
----------
%(applyfun_fun)s
%(picks_all_data_noref)s
%(applyfun_dtype)s
%(n_jobs)s
%(applyfun_chwise_epo)s
%(verbose_meth)s
%(kwarg_fun)s
Returns
-------
self : instance of Epochs
The epochs object with transformed data.
"""
_check_preload(self, 'epochs.apply_function')
picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False)
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if channel_wise:
if n_jobs == 1:
_fun = partial(_check_fun, fun, **kwargs)
# modify data inplace to save memory
for idx in picks:
self._data[:, idx, :] = np.apply_along_axis(
_fun, -1, data_in[:, idx, :])
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(
fun, data_in[:, p, :], **kwargs) for p in picks)
for pp, p in enumerate(picks):
self._data[:, p, :] = data_picks_new[pp]
else:
self._data = _check_fun(fun, data_in, **kwargs)
return self
@property
def times(self):
"""Time vector in seconds."""
return self._times_readonly
def _set_times(self, times):
"""Set self._times_readonly (and make it read only)."""
# naming used to indicate that it shouldn't be
# changed directly, but rather via this method
self._times_readonly = times.copy()
self._times_readonly.flags['WRITEABLE'] = False
@property
def tmin(self):
"""First time point."""
return self.times[0]
@property
def filename(self):
"""The filename."""
return self._filename
@property
def tmax(self):
"""Last time point."""
return self.times[-1]
def __repr__(self):
"""Build string representation."""
s = ' %s events ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', %g - %g sec' % (self.tmin, self.tmax)
s += ', baseline '
if self.baseline is None:
s += 'off'
else:
s += f'{self.baseline[0]:g} – {self.baseline[1]:g} sec'
if self.baseline != _check_baseline(
self.baseline, times=self.times, sfreq=self.info['sfreq'],
on_baseline_outside_data='adjust'):
s += ' (baseline period was cropped after baseline correction)'
s += ', ~%s' % (sizeof_fmt(self._size),)
s += ', data%s loaded' % ('' if self.preload else ' not')
s += ', with metadata' if self.metadata is not None else ''
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
if len(self.event_id) > 0:
s += ',' + '\n '.join([''] + counts)
class_name = self.__class__.__name__
class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name
return '<%s | %s>' % (class_name, s)
def _repr_html_(self):
if self.baseline is None:
baseline = 'off'
else:
baseline = tuple([f'{b:.3f}' for b in self.baseline])
baseline = f'{baseline[0]} – {baseline[1]} sec'
if isinstance(self.event_id, dict):
events = ''
for k, v in sorted(self.event_id.items()):
n_events = sum(self.events[:, 2] == v)
events += f'{k}: {n_events}<br>'
elif isinstance(self.event_id, list):
events = ''
for k in self.event_id:
n_events = sum(self.events[:, 2] == k)
events += f'{k}: {n_events}<br>'
elif isinstance(self.event_id, int):
n_events = len(self.events[:, 2])
events = f'{self.event_id}: {n_events}<br>'
else:
events = None
return epochs_template.substitute(epochs=self, baseline=baseline,
events=events)
@verbose
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, 'Modifying data of epochs')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
# Adjust rejection period
if self.reject_tmin is not None and self.reject_tmin < self.tmin:
logger.info(
f'reject_tmin is not in epochs time interval. '
f'Setting reject_tmin to epochs.tmin ({self.tmin} sec)')
self.reject_tmin = self.tmin
if self.reject_tmax is not None and self.reject_tmax > self.tmax:
logger.info(
f'reject_tmax is not in epochs time interval. '
f'Setting reject_tmax to epochs.tmax ({self.tmax} sec)')
self.reject_tmax = self.tmax
return self
def copy(self):
"""Return copy of Epochs instance.
Returns
-------
epochs : instance of Epochs
A copy of the object.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
# drop_log is immutable and _raw is private (and problematic to
# deepcopy)
if k in ('drop_log', '_raw', '_times_readonly'):
memodict[id(v)] = v
else:
v = deepcopy(v, memodict)
result.__dict__[k] = v
return result
@verbose
def save(self, fname, split_size='2GB', fmt='single', overwrite=False,
verbose=True):
"""Save epochs in a fif file.
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
split_size : str | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
fmt : str
Format to save data. Valid options are 'double' or
'single' for 64- or 32-bit float, or for 128- or
64-bit complex numbers respectively. Note: Data are processed with
double precision. Choosing single-precision, the saved data
will slightly differ due to the reduction in precision.
.. versionadded:: 0.17
%(overwrite)s
To overwrite original file (the same one that was loaded),
data must be preloaded upon reading. This defaults to True in 0.18
but will change to False in 0.19.
.. versionadded:: 0.18
%(verbose_meth)s
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
# check for file existence
_check_fname(fname, overwrite)
split_size_bytes = _get_split_size(split_size)
_check_option('fmt', fmt, ['single', 'double'])
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
# total_size tracks sizes that get split
# over_size tracks overhead (tags, things that get written to each)
if len(self) == 0:
warn('Saving epochs with no data')
total_size = 0
else:
d = self[0].get_data()
# this should be guaranteed by subclasses
assert d.dtype in ('>f8', '<f8', '>c16', '<c16')
total_size = d.nbytes * len(self)
self._check_consistency()
over_size = 0
if fmt == "single":
total_size //= 2 # 64bit data converted to 32bit before writing.
over_size += 32 # FIF tags
# Account for all the other things we write, too
# 1. meas_id block plus main epochs block
over_size += 132
# 2. measurement info (likely slight overestimate, but okay)
over_size += object_size(self.info) + 16 * len(self.info)
# 3. events and event_id in its own block
total_size += self.events.size * 4
over_size += len(_event_id_string(self.event_id)) + 72
# 4. Metadata in a block of its own
if self.metadata is not None:
total_size += len(_prepare_write_metadata(self.metadata))
over_size += 56
# 5. first sample, last sample, baseline
over_size += 40 * (self.baseline is not None) + 40
# 6. drop log: gets written to each, with IGNORE for ones that are
# not part of it. So make a fake one with all having entries.
drop_size = len(json.dumps(self.drop_log)) + 16
drop_size += 8 * (len(self.selection) - 1) # worst case: all but one
over_size += drop_size
# 7. reject params
reject_params = _pack_reject_params(self)
if reject_params:
over_size += len(json.dumps(reject_params)) + 16
# 8. selection
total_size += self.selection.size * 4
over_size += 16
# 9. end of file tags
over_size += _NEXT_FILE_BUFFER
logger.debug(f' Overhead size: {str(over_size).rjust(15)}')
logger.debug(f' Splittable size: {str(total_size).rjust(15)}')
logger.debug(f' Split size: {str(split_size_bytes).rjust(15)}')
# need at least one per
n_epochs = len(self)
n_per = total_size // n_epochs if n_epochs else 0
min_size = n_per + over_size
if split_size_bytes < min_size:
raise ValueError(
f'The split size {split_size} is too small to safely write '
'the epochs contents, minimum split size is '
f'{sizeof_fmt(min_size)} ({min_size} bytes)')
# This is like max(int(ceil(total_size / split_size)), 1) but cleaner
n_parts = max(
(total_size - 1) // (split_size_bytes - over_size) + 1, 1)
assert n_parts >= 1, n_parts
if n_parts > 1:
logger.info(f'Splitting into {n_parts} parts')
if n_parts > 100: # This must be an error
raise ValueError(
f'Split size {split_size} would result in writing '
f'{n_parts} files')
if len(self.drop_log) > 100000:
warn(f'epochs.drop_log contains {len(self.drop_log)} entries '
f'which will incur up to a {sizeof_fmt(drop_size)} writing '
f'overhead (per split file), consider using '
f'epochs.reset_drop_log_selection() prior to writing')
epoch_idxs = np.array_split(np.arange(n_epochs), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts, fmt)
def equalize_event_counts(self, event_ids, method='mintime'):
"""Equalize the number of trials in each condition.
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[120, 121].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
In the case where partial matching is used (using '/' in
``event_ids``), ``event_ids`` will be matched according to the
provided tags, that is, processing works as if the event_ids
matched by the provided tags had been supplied instead.
The event_ids must identify nonoverlapping subsets of the epochs.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list
will be minimized.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
If multiple indices are provided (e.g. 'Left' and 'Right' in the
example above), it is not guaranteed that after equalization, the
conditions will contribute evenly. E.g., it is possible to end up
with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
"""
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not self._bad_dropped:
self.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = self.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, str) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids
if all((tag in k.split("/")
for tag in id_))] # ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(orig_ids[ii] + "not found in the "
"epoch object's event_id.")
elif len({sub_id in ids for sub_id in id_}) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(self[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq_inds.append(self._keys_to_idx(eq))
event_times = [self.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
self.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return self, indices
@fill_doc
def to_data_frame(self, picks=None, index=None,
scalings=None, copy=True, long_format=False,
time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
additional columns "time", "epoch" (epoch number), and "condition"
(epoch event description) are added, unless ``index`` is not ``None``
(in which case the columns specified in ``index`` will be used to form
the DataFrame's index instead).
Parameters
----------
%(picks_all)s
%(df_index_epo)s
Valid string values are 'time', 'epoch', and 'condition'.
Defaults to ``None``.
%(df_scalings)s
%(df_copy)s
%(df_longform_epo)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'epoch', 'condition']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
if copy:
data = data.copy()
data = _scale_dataframe_data(self, data, picks, scalings)
# prepare extra columns / multiindex
mindex = list()
times = np.tile(times, n_epochs)
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
rev_event_id = {v: k for k, v in self.event_id.items()}
conditions = [rev_event_id[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(conditions, n_times)))
mindex.append(('epoch', np.repeat(self.selection, n_times)))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
# build DataFrame
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=['condition', 'epoch', 'time'])
return df
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual epochs using interpolated fields.
.. Warning:: Using virtual epochs to compute inverse can yield
unexpected results. The virtual channels have ``'_v'`` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used. ``'fast'`` should be sufficient
for most applications.
Returns
-------
epochs : instance of mne.EpochsArray
The transformed epochs object containing only virtual channels.
Notes
-----
This method returns a copy and does not modify the data it
operates on. It also returns an EpochsArray instance.
.. versionadded:: 0.20.0
"""
from .forward import _as_meg_type_inst
return _as_meg_type_inst(self, ch_type=ch_type, mode=mode)
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""Compute drop log stats.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, tuple) or \
not all(isinstance(d, tuple) for d in drop_log) or \
not all(isinstance(s, str) for d in drop_log for s in d):
raise TypeError('drop_log must be a tuple of tuple of str')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
def make_metadata(events, event_id, tmin, tmax, sfreq,
row_events=None, keep_first=None, keep_last=None):
"""Generate metadata from events for use with `mne.Epochs`.
This function mimics the epoching process (it constructs time windows
around time-locked "events of interest") and collates information about
any other events that occurred within those time windows. The information
is returned as a :class:`pandas.DataFrame` suitable for use as
`~mne.Epochs` metadata: one row per time-locked event, and columns
indicating presence/absence and latency of each ancillary event type.
The function will also return a new ``events`` array and ``event_id``
dictionary that correspond to the generated metadata.
Parameters
----------
events : array, shape (m, 3)
The :term:`events array <events>`. By default, the returned metadata
:class:`~pandas.DataFrame` will have as many rows as the events array.
To create rows for only a subset of events, pass the ``row_events``
parameter.
event_id : dict
A mapping from event names (keys) to event IDs (values). The event
names will be incorporated as columns of the returned metadata
:class:`~pandas.DataFrame`.
tmin, tmax : float
Start and end of the time interval for metadata generation in seconds,
relative to the time-locked event of the respective time window.
.. note::
If you are planning to attach the generated metadata to
`~mne.Epochs` and intend to include only events that fall inside
your epochs time interval, pass the same ``tmin`` and ``tmax``
values here as you use for your epochs.
sfreq : float
The sampling frequency of the data from which the events array was
extracted.
row_events : list of str | str | None
Event types around which to create the time windows / for which to
create **rows** in the returned metadata :class:`pandas.DataFrame`. If
provided, the string(s) must be keys of ``event_id``. If ``None``
(default), rows are created for **all** event types present in
``event_id``.
keep_first : str | list of str | None
Specify subsets of :term:`hierarchical event descriptors` (HEDs,
inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which
the **first occurrence** within each time window shall be stored in
addition to the original events.
.. note::
There is currently no way to retain **all** occurrences of a
repeated event. The ``keep_first`` parameter can be used to specify
subsets of HEDs, effectively creating a new event type that is the
union of all events types described by the matching HED pattern.
Only the very first event of this set will be kept.
For example, you might have two response events types,
``response/left`` and ``response/right``; and in trials with both
responses occurring, you want to keep only the first response. In this
case, you can pass ``keep_first='response'``. This will add two new
columns to the metadata: ``response``, indicating at what **time** the
event occurred, relative to the time-locked event; and
``first_response``, stating which **type** (``'left'`` or ``'right'``)
of event occurred.
To match specific subsets of HEDs describing different sets of events,
pass a list of these subsets, e.g.
``keep_first=['response', 'stimulus']``. If ``None`` (default), no
event aggregation will take place and no new columns will be created.
.. note::
By default, this function will always retain the first instance
of any event in each time window. For example, if a time window
contains two ``'response'`` events, the generated ``response``
column will automatically refer to the first of the two events. In
this specific case, it is therefore **not** necessary to make use of
the ``keep_first`` parameter – unless you need to differentiate
between two types of responses, like in the example above.
keep_last : list of str | None
Same as ``keep_first``, but for keeping only the **last** occurrence
of matching events. The column indicating the **type** of an event
``myevent`` will be named ``last_myevent``.
Returns
-------
metadata : pandas.DataFrame
Metadata for each row event, with the following columns:
- ``event_name``, with strings indicating the name of the time-locked
event ("row event") for that specific time window
- one column per event type in ``event_id``, with the same name; floats
indicating the latency of the event in seconds, relative to the
time-locked event
- if applicable, additional columns named after the ``keep_first`` and
``keep_last`` event types; floats indicating the latency of the
event in seconds, relative to the time-locked event
- if applicable, additional columns ``first_{event_type}`` and
``last_{event_type}`` for ``keep_first`` and ``keep_last`` event
types, respetively; the values will be strings indicating which event
types were matched by the provided HED patterns
events : array, shape (n, 3)
The events corresponding to the generated metadata, i.e. one
time-locked event per row.
event_id : dict
The event dictionary corresponding to the new events array. This will
be identical to the input dictionary unless ``row_events`` is supplied,
in which case it will only contain the events provided there.
Notes
-----
The time window used for metadata generation need not correspond to the
time window used to create the `~mne.Epochs`, to which the metadata will
be attached; it may well be much shorter or longer, or not overlap at all,
if desired. The can be useful, for example, to include events that ccurred
before or after an epoch, e.g. during the inter-trial interval.
.. versionadded:: 0.23
References
----------
.. footbibliography::
"""
from .utils.mixin import _hid_match
pd = _check_pandas_installed()
_validate_type(event_id, types=(dict,), item_name='event_id')
_validate_type(row_events, types=(None, str, list, tuple),
item_name='row_events')
_validate_type(keep_first, types=(None, str, list, tuple),
item_name='keep_first')
_validate_type(keep_last, types=(None, str, list, tuple),
item_name='keep_last')
if not event_id:
raise ValueError('event_id dictionary must contain at least one entry')
def _ensure_list(x):
if x is None:
return []
elif isinstance(x, str):
return [x]
else:
return list(x)
row_events = _ensure_list(row_events)
keep_first = _ensure_list(keep_first)
keep_last = _ensure_list(keep_last)
keep_first_and_last = set(keep_first) & set(keep_last)
if keep_first_and_last:
raise ValueError(f'The event names in keep_first and keep_last must '
f'be mutually exclusive. Specified in both: '
f'{", ".join(sorted(keep_first_and_last))}')
del keep_first_and_last
for param_name, values in dict(keep_first=keep_first,
keep_last=keep_last).items():
for first_last_event_name in values:
try:
_hid_match(event_id, [first_last_event_name])
except KeyError:
raise ValueError(
f'Event "{first_last_event_name}", specified in '
f'{param_name}, cannot be found in event_id dictionary')
event_name_diff = sorted(set(row_events) - set(event_id.keys()))
if event_name_diff:
raise ValueError(
f'Present in row_events, but missing from event_id: '
f'{", ".join(event_name_diff)}')
del event_name_diff
# First and last sample of each epoch, relative to the time-locked event
# This follows the approach taken in mne.Epochs
start_sample = int(round(tmin * sfreq))
stop_sample = int(round(tmax * sfreq)) + 1
# Make indexing easier
# We create the DataFrame before subsetting the events so we end up with
# indices corresponding to the original event indices. Not used for now,
# but might come in handy sometime later
events_df = pd.DataFrame(events, columns=('sample', 'prev_id', 'id'))
id_to_name_map = {v: k for k, v in event_id.items()}
# Only keep events that are of interest
events = events[np.in1d(events[:, 2], list(event_id.values()))]
events_df = events_df.loc[events_df['id'].isin(event_id.values()), :]
# Prepare & condition the metadata DataFrame
# Avoid column name duplications if the exact same event name appears in
# event_id.keys() and keep_first / keep_last simultaneously
keep_first_cols = [col for col in keep_first if col not in event_id]
keep_last_cols = [col for col in keep_last if col not in event_id]
first_cols = [f'first_{col}' for col in keep_first_cols]
last_cols = [f'last_{col}' for col in keep_last_cols]
columns = ['event_name',
*event_id.keys(),
*keep_first_cols,
*keep_last_cols,
*first_cols,
*last_cols]
data = np.empty((len(events_df), len(columns)))
metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index)
# Event names
metadata.iloc[:, 0] = ''
# Event times
start_idx = 1
stop_idx = (start_idx + len(event_id.keys()) +
len(keep_first_cols + keep_last_cols))
metadata.iloc[:, start_idx:stop_idx] = np.nan
# keep_first and keep_last names
start_idx = stop_idx
metadata.iloc[:, start_idx:] = None
# We're all set, let's iterate over all eventns and fill in in the
# respective cells in the metadata. We will subset this to include only
# `row_events` later
for row_event in events_df.itertuples(name='RowEvent'):
row_idx = row_event.Index
metadata.loc[row_idx, 'event_name'] = \
id_to_name_map[row_event.id]
# Determine which events fall into the current epoch
window_start_sample = row_event.sample + start_sample
window_stop_sample = row_event.sample + stop_sample
events_in_window = events_df.loc[
(events_df['sample'] >= window_start_sample) &
(events_df['sample'] <= window_stop_sample), :]
assert not events_in_window.empty
# Store the metadata
for event in events_in_window.itertuples(name='Event'):
event_sample = event.sample - row_event.sample
event_time = event_sample / sfreq
event_time = 0 if np.isclose(event_time, 0) else event_time
event_name = id_to_name_map[event.id]
if not np.isnan(metadata.loc[row_idx, event_name]):
# Event already exists in current time window!
assert metadata.loc[row_idx, event_name] <= event_time
if event_name not in keep_last:
continue
metadata.loc[row_idx, event_name] = event_time
# Handle keep_first and keep_last event aggregation
for event_group_name in keep_first + keep_last:
if event_name not in _hid_match(event_id, [event_group_name]):
continue
if event_group_name in keep_first:
first_last_col = f'first_{event_group_name}'
else:
first_last_col = f'last_{event_group_name}'
old_time = metadata.loc[row_idx, event_group_name]
if not np.isnan(old_time):
if ((event_group_name in keep_first and
old_time <= event_time) or
(event_group_name in keep_last and
old_time >= event_time)):
continue
if event_group_name not in event_id:
# This is an HED. Strip redundant information from the
# event name
name = (event_name
.replace(event_group_name, '')
.replace('//', '/')
.strip('/'))
metadata.loc[row_idx, first_last_col] = name
del name
metadata.loc[row_idx, event_group_name] = event_time
# Only keep rows of interest
if row_events:
event_id_timelocked = {name: val for name, val in event_id.items()
if name in row_events}
events = events[np.in1d(events[:, 2],
list(event_id_timelocked.values()))]
metadata = metadata.loc[
metadata['event_name'].isin(event_id_timelocked)]
assert len(events) == len(metadata)
event_id = event_id_timelocked
return metadata, events, event_id
@fill_doc
class Epochs(BaseEpochs):
"""Epochs extracted from a Raw instance.
Parameters
----------
%(epochs_raw)s
%(epochs_events_event_id)s
%(epochs_tmin_tmax)s
%(baseline_epochs)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(picks_all)s
preload : bool
%(epochs_preload)s
%(reject_epochs)s
%(flat)s
%(proj_epochs)s
%(decim)s
%(epochs_reject_tmin_tmax)s
%(epochs_detrend)s
%(epochs_on_missing)s
%(reject_by_annotation_epochs)s
%(epochs_metadata)s
%(epochs_event_repeated)s
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : tuple of tuple
A tuple of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty tuple; otherwise it will be
a tuple of the reasons the event is not longer in the selection, e.g.:
- 'IGNORED'
If it isn't part of the current subset defined by the user
- 'NO_DATA' or 'TOO_SHORT'
If epoch didn't contain enough data names of channels that exceeded
the amplitude threshold
- 'EQUALIZED_COUNTS'
See :meth:`~mne.Epochs.equalize_event_counts`
- 'USER'
For user-defined reasons (see :meth:`~mne.Epochs.drop`).
filename : str
The filename of the object.
times : ndarray
Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
%(verbose)s
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing using ``epochs[...]``, see
:meth:`mne.Epochs.__getitem__`.
All methods for iteration over objects (using :meth:`mne.Epochs.__iter__`,
:meth:`mne.Epochs.iter_evoked` or :meth:`mne.Epochs.next`) use the same
internal state.
If ``event_repeated`` is set to ``'merge'``, the coinciding events
(duplicates) will be merged into a single event_id and assigned a new
id_number as::
event_id['{event_id_1}/{event_id_2}/...'] = new_id_number
For example with the event_id ``{'aud': 1, 'vis': 2}`` and the events
``[[0, 0, 1], [0, 0, 2]]``, the "merge" behavior will update both event_id
and events to be: ``{'aud/vis': 3}`` and ``[[0, 0, 3]]`` respectively.
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, preload=False, reject=None,
flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, on_missing='raise',
reject_by_annotation=True, metadata=None,
event_repeated='error', verbose=None): # noqa: D102
if not isinstance(raw, BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of mne.io.BaseRaw')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# call BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax, metadata=metadata,
baseline=baseline, raw=raw, picks=picks, reject=reject,
flat=flat, decim=decim, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
proj=proj, on_missing=on_missing, preload_at_end=preload,
event_repeated=event_repeated, verbose=verbose)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk.
Returns
-------
data : array | str | None
If string, it's details on rejection reason.
If array, it's the data in the desired range (good segment)
If None, it means no data is available.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found. '
'Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment from "start" to "stop" in samples
first_samp = self._raw.first_samp
start = int(round(event_samp + self._raw_times[0] * sfreq))
start -= first_samp
stop = start + len(self._raw_times)
# reject_tmin, and reject_tmax need to be converted to samples to
# check the reject_by_annotation boundaries: reject_start, reject_stop
reject_tmin = self.reject_tmin
if reject_tmin is None:
reject_tmin = self._raw_times[0]
reject_start = int(round(event_samp + reject_tmin * sfreq))
reject_start -= first_samp
reject_tmax = self.reject_tmax
if reject_tmax is None:
reject_tmax = self._raw_times[-1]
diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq))
reject_stop = stop - diff
logger.debug(' Getting epoch for %d-%d' % (start, stop))
data = self._raw._check_bad_segment(start, stop, self.picks,
reject_start, reject_stop,
self.reject_by_annotation)
return data
@fill_doc
class EpochsArray(BaseEpochs):
"""Epochs object from numpy array.
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch. See notes for proper units of
measure.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to 0.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
%(reject_epochs)s
%(flat)s
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
%(baseline_epochs)s
Defaults to ``None``, i.e. no baseline correction.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
on_missing : str
See :class:`mne.Epochs` docstring for details.
metadata : instance of pandas.DataFrame | None
See :class:`mne.Epochs` docstring for details.
.. versionadded:: 0.16
selection : ndarray | None
The selection compared to the original set of epochs.
Can be None to use ``np.arange(len(events))``.
.. versionadded:: 0.16
%(verbose)s
See Also
--------
create_info
EvokedArray
io.RawArray
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True,
on_missing='raise', metadata=None, selection=None,
verbose=None): # noqa: D102
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
info = info.copy() # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
super(EpochsArray, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, reject=reject,
flat=flat, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
decim=1, metadata=metadata, selection=selection, proj=proj,
on_missing=on_missing)
if self.baseline is not None:
self._do_baseline = True
if len(events) != np.in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
detrend_picks = self._detrend_picks
for e in self._data:
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e, detrend_picks)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The modified epochs.
Notes
-----
This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``::
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
new_event_num = operator.index(new_event_num)
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances.
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
Notes
-----
This tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Examples
--------
>>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP
"""
if not all(isinstance(e, BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Get indices to drop from multiple event timing lists."""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
_check_option('method', method, ['mintime', 'truncate'])
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences."""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
if len(t_shorter) == 0:
keep.fill(False)
return keep
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False)
# this is a speed tweak, only exists for certain versions of scipy
if 'assume_sorted' in _get_args(interp1d.__init__):
kwargs['assume_sorted'] = True
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=t_keeps[:, -1],
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to reject and flat.
If full_report=True, it will give True/False as well as a list of all
offending channels.
"""
bad_tuple = tuple()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in refl.items():
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
bad_names = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, bad_names))
has_printed = True
if not full_report:
return False
else:
bad_tuple += tuple(bad_names)
if not full_report:
return True
else:
if bad_tuple == ():
return True, None
else:
return False, bad_tuple
def _read_one_epoch_file(f, tree, preload):
"""Read a single FIF file."""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
events, mappings = _read_events_fif(fid, tree)
# Metadata
metadata = None
metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA)
if len(metadata_tree) > 0:
for dd in metadata_tree[0]['directory']:
kind = dd.kind
pos = dd.pos
if kind == FIFF.FIFF_DESCRIPTION:
metadata = read_tag(fid, pos).data
metadata = _prepare_read_metadata(metadata)
break
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
del meas
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
reject_params = {}
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
data_tag.type = data_tag.type ^ (1 << 30)
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = tag.data
drop_log = json.loads(drop_log)
drop_log = tuple(tuple(x) for x in drop_log)
elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT:
tag = read_tag(fid, pos)
reject_params = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq']))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
size_expected = len(events) * np.prod(epoch_shape)
# on read double-precision is always used
if data_tag.type == FIFF.FIFFT_FLOAT:
datatype = np.float64
fmt = '>f4'
elif data_tag.type == FIFF.FIFFT_DOUBLE:
datatype = np.float64
fmt = '>f8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT:
datatype = np.complex128
fmt = '>c8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:
datatype = np.complex128
fmt = '>c16'
fmt_itemsize = np.dtype(fmt).itemsize
assert fmt_itemsize in (4, 8, 16)
size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize
if not size_actual == size_expected:
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (size_actual, size_expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(datatype)
data *= cals
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = ({str(e): e for e in np.unique(events[:, 2])}
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = ((),) * len(events)
return (info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals, reject_params,
fmt)
@verbose
def read_epochs(fname, proj=True, preload=True, verbose=None):
"""Read epochs from a fif file.
Parameters
----------
fname : str | file-like
The epochs filename to load. Filename should end with -epo.fif or
-epo.fif.gz. If a file-like object is provided, preloading must be
used.
%(proj_epochs)s
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
"""
return EpochsFIF(fname, proj, preload, verbose)
class _RawContainer(object):
"""Helper for a raw data container."""
def __init__(self, fid, data_tag, event_samps, epoch_shape,
cals, fmt): # noqa: D102
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
self.fmt = fmt
def __del__(self): # noqa: D105
self.fid.close()
@fill_doc
class EpochsFIF(BaseEpochs):
"""Epochs read from disk.
Parameters
----------
fname : str | file-like
The name of the file, which should end with -epo.fif or -epo.fif.gz. If
a file-like object is provided, preloading must be used.
%(proj_epochs)s
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
%(verbose)s
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, preload=True,
verbose=None): # noqa: D102
if isinstance(fname, str):
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
elif not preload:
raise ValueError('preload must be used with file-like objects')
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
fname_rep = _get_fname_rep(fname)
logger.info('Reading %s ...' % fname_rep)
fid, tree, _ = fiff_open(fname, preload=preload)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals,
reject_params, fmt) = \
_read_one_epoch_file(fid, tree, preload)
if (events[:, 0] < 0).any():
events = events.copy()
warn('Incorrect events detected on disk, setting event '
'numbers to consecutive increasing integers')
events[:, 0] = np.arange(1, len(events) + 1)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
# we also retain original baseline without re-applying baseline
# correction (data is being baseline-corrected when written to
# disk)
epoch = BaseEpochs(
info, data, events, event_id, tmin, tmax,
baseline=None,
metadata=metadata, on_missing='ignore',
selection=selection, drop_log=drop_log,
proj=False, verbose=False)
epoch.baseline = baseline
epoch._do_baseline = False # might be superfluous but won't hurt
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals, fmt))
if next_fname is not None:
fnames.append(next_fname)
(info, data, events, event_id, tmin, tmax, metadata, baseline,
selection, drop_log, _) = \
_concatenate_epochs(ep_list, with_data=preload, add_offset=False)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
drop_log = list(drop_log)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ('IGNORED',) and b != ('IGNORED',):
drop_log[k] = b
drop_log = tuple(drop_log[:step])
# call BaseEpochs constructor
# again, ensure we're retaining the baseline period originally loaded
# from disk without trying to re-apply baseline correction
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax, baseline=None, raw=raw,
proj=proj, preload_at_end=False, on_missing='ignore',
selection=selection, drop_log=drop_log, filename=fname_rep,
metadata=metadata, verbose=verbose, **reject_params)
self.baseline = baseline
self._do_baseline = False
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk."""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
fmt = raw.fmt
idx = idx[0]
size = np.prod(raw.epoch_shape) * np.dtype(fmt).itemsize
offset = idx * size + 16 # 16 = Tag header
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset, 0)
if fmt == '>c8':
read_fmt = '>f4'
elif fmt == '>c16':
read_fmt = '>f8'
else:
read_fmt = fmt
data = np.frombuffer(raw.fid.read(size), read_fmt)
if read_fmt != fmt:
data = data.view(fmt)
data = data.astype(np.complex128)
else:
data = data.astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
@fill_doc
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping.
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
%(random_state)s
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng_uniform(rng)(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function."""
if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len({epochs.tmin for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len({epochs.tmax for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len({epochs.baseline for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, verbose=None):
"""Concatenate channels, info and data from two Epochs objects.
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
%(verbose)s Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list)
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, False,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, name):
"""Compare infos."""
if not isinstance(name, str): # passed epochs index
name = f'epochs[{name:d}]'
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError(f'{name}.info[\'nchan\'] must match')
if set(info1['bads']) != set(info2['bads']):
raise ValueError(f'{name}.info[\'bads\'] must match')
if info1['sfreq'] != info2['sfreq']:
raise ValueError(f'{name}.info[\'sfreq\'] must match')
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError(f'{name}.info[\'ch_names\'] must match')
if len(info2['projs']) != len(info1['projs']):
raise ValueError(f'SSP projectors in {name} must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError(f'SSP projectors in {name} must be the same')
if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \
(info1['dev_head_t'] is not None and not
np.allclose(info1['dev_head_t']['trans'],
info2['dev_head_t']['trans'], rtol=1e-6)):
raise ValueError(f'{name}.info[\'dev_head_t\'] must match. The '
'instances probably come from different runs, and '
'are therefore associated with different head '
'positions. Manually change info[\'dev_head_t\'] to '
'avoid this message but beware that this means the '
'MEG sensors will not be properly spatially aligned. '
'See mne.preprocessing.maxwell_filter to realign the '
'runs to a common head position.')
def _update_offset(offset, events, shift):
if offset == 0:
return offset
offset = 0 if offset is None else offset
offset = np.int64(offset) + np.max(events[:, 0]) + shift
if offset > INT32_MAX:
warn(f'Event number greater than {INT32_MAX} created, events[:, 0] '
'will be assigned consecutive increasing integer values')
offset = 0
return offset
def _concatenate_epochs(epochs_list, with_data=True, add_offset=True):
"""Auxiliary function for concatenating epochs."""
if not isinstance(epochs_list, (list, tuple)):
raise TypeError('epochs_list must be a list or tuple, got %s'
% (type(epochs_list),))
for ei, epochs in enumerate(epochs_list):
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs_list[%d] must be an instance of Epochs, '
'got %s' % (ei, type(epochs)))
out = epochs_list[0]
offsets = [0]
if with_data:
out.drop_bad()
offsets.append(len(out))
events = [out.events]
metadata = [out.metadata]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
info = deepcopy(out.info)
verbose = out.verbose
drop_log = out.drop_log
event_id = deepcopy(out.event_id)
selection = out.selection
# offset is the last epoch + tmax + 10 second
shift = int((10 + tmax) * out.info['sfreq'])
events_offset = _update_offset(None, out.events, shift)
for ii, epochs in enumerate(epochs_list[1:], 1):
_compare_epochs_infos(epochs.info, info, ii)
if not np.allclose(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
# compare event_id
common_keys = list(set(event_id).intersection(set(epochs.event_id)))
for key in common_keys:
if not event_id[key] == epochs.event_id[key]:
msg = ('event_id values must be the same for identical keys '
'for all concatenated epochs. Key "{}" maps to {} in '
'some epochs and to {} in others.')
raise ValueError(msg.format(key, event_id[key],
epochs.event_id[key]))
if with_data:
epochs.drop_bad()
offsets.append(len(epochs))
evs = epochs.events.copy()
# add offset
if add_offset:
evs[:, 0] += events_offset
# Update offset for the next iteration.
events_offset = _update_offset(events_offset, epochs.events, shift)
events.append(evs)
selection = np.concatenate((selection, epochs.selection))
drop_log = drop_log + epochs.drop_log
event_id.update(epochs.event_id)
metadata.append(epochs.metadata)
events = np.concatenate(events, axis=0)
# check to see if we exceeded our maximum event offset
if events_offset == 0:
events[:, 0] = np.arange(1, len(events) + 1)
# Create metadata object (or make it None)
n_have = sum(this_meta is not None for this_meta in metadata)
if n_have == 0:
metadata = None
elif n_have != len(metadata):
raise ValueError('%d of %d epochs instances have metadata, either '
'all or none must have metadata'
% (n_have, len(metadata)))
else:
pd = _check_pandas_installed(strict=False)
if pd is not False:
metadata = pd.concat(metadata)
else: # dict of dicts
metadata = sum(metadata, list())
assert len(offsets) == (len(epochs_list) if with_data else 0) + 1
data = None
if with_data:
offsets = np.cumsum(offsets)
for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list):
this_data = epochs.get_data()
if data is None:
data = np.empty(
(offsets[-1], len(out.ch_names), len(out.times)),
dtype=this_data.dtype)
data[start:stop] = this_data
return (info, data, events, event_id, tmin, tmax, metadata, baseline,
selection, drop_log, verbose)
def _finish_concat(info, data, events, event_id, tmin, tmax, metadata,
baseline, selection, drop_log, verbose):
"""Finish concatenation for epochs not read from disk."""
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline=baseline,
selection=selection, drop_log=drop_log, proj=False,
on_missing='ignore', metadata=metadata, verbose=verbose)
out.drop_bad()
return out
def concatenate_epochs(epochs_list, add_offset=True):
"""Concatenate a list of epochs into one epochs object.
Parameters
----------
epochs_list : list
List of Epochs instances to concatenate (in order).
add_offset : bool
If True, a fixed offset is added to the event times from different
Epochs sets, such that they are easy to distinguish after the
concatenation.
If False, the event times are unaltered during the concatenation.
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list,
add_offset=add_offset))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
mag_scale=100., verbose=None):
"""Average data using Maxwell filtering, transforming using head positions.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
%(maxwell_pos)s
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
%(picks_all_data)s
%(maxwell_origin)s
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_dest)s
%(maxwell_ref)s
return_mapping : bool
If True, return the mapping matrix.
%(maxwell_mag)s
.. versionadded:: 0.13
%(verbose)s
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
""" # noqa: E501
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks_fix_mags,
_prep_mf_coils, _check_destination,
_remove_meg_projs, _get_coil_scale)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
info_to = epochs.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, _ = \
_get_mf_picks_fix_mags(info_to, int_order, ext_order, ignore_ref)
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info_to)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(info_to, meg_picks[good_mask], copy=True)
all_coils_recon = _prep_mf_coils(info_to, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_mask]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
n_in = _get_n_moments(int_order)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = info_to['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version (eq. 44)
weight = np.linalg.norm(S[:, :n_in])
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average',
comment=epochs._name)
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
@verbose
def make_fixed_length_epochs(raw, duration=1., preload=False,
reject_by_annotation=True, proj=True, overlap=0.,
verbose=None):
"""Divide continuous raw data into equal-sized consecutive epochs.
Parameters
----------
raw : instance of Raw
Raw data to divide into segments.
duration : float
Duration of each epoch in seconds. Defaults to 1.
%(preload)s
%(reject_by_annotation_epochs)s
.. versionadded:: 0.21.0
%(proj_epochs)s
.. versionadded:: 0.22.0
overlap : float
The overlap between epochs, in seconds. Must be
``0 <= overlap < duration``. Default is 0, i.e., no overlap.
.. versionadded:: 0.23.0
%(verbose)s
Returns
-------
epochs : instance of Epochs
Segmented data.
Notes
-----
.. versionadded:: 0.20
"""
events = make_fixed_length_events(raw, 1, duration=duration,
overlap=overlap)
delta = 1. / raw.info['sfreq']
return Epochs(raw, events, event_id=[1], tmin=0, tmax=duration - delta,
baseline=None, preload=preload,
reject_by_annotation=reject_by_annotation, proj=proj,
verbose=verbose)
| bsd-3-clause |
cmoutard/mne-python | mne/label.py | 4 | 73657 | # Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
from collections import defaultdict
from colorsys import hsv_to_rgb, rgb_to_hsv
from os import path as op
import os
import copy as cp
import re
import numpy as np
from scipy import linalg, sparse
from .fixes import digitize, in1d
from .utils import get_subjects_dir, _check_subject, logger, verbose
from .source_estimate import (morph_data, SourceEstimate,
spatial_src_connectivity)
from .source_space import add_source_space_distances
from .surface import read_surface, fast_cross_3d, mesh_edges, mesh_dist
from .source_space import SourceSpaces
from .parallel import parallel_func, check_n_jobs
from .stats.cluster_level import _find_clusters
from .externals.six import b, string_types
from .externals.six.moves import zip, xrange
def _blend_colors(color_1, color_2):
"""Blend two colors in HSV space
Parameters
----------
color_1, color_2 : None | tuple
RGBA tuples with values between 0 and 1. None if no color is available.
If both colors are None, the output is None. If only one is None, the
output is the other color.
Returns
-------
color : None | tuple
RGBA tuple of the combined color. Saturation, value and alpha are
averaged, whereas the new hue is determined as angle half way between
the two input colors' hues.
"""
if color_1 is None and color_2 is None:
return None
elif color_1 is None:
return color_2
elif color_2 is None:
return color_1
r_1, g_1, b_1, a_1 = color_1
h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1)
r_2, g_2, b_2, a_2 = color_2
h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2)
hue_diff = abs(h_1 - h_2)
if hue_diff < 0.5:
h = min(h_1, h_2) + hue_diff / 2.
else:
h = max(h_1, h_2) + (1. - hue_diff) / 2.
h %= 1.
s = (s_1 + s_2) / 2.
v = (v_1 + v_2) / 2.
r, g, b = hsv_to_rgb(h, s, v)
a = (a_1 + a_2) / 2.
color = (r, g, b, a)
return color
def _split_colors(color, n):
"""Create n colors in HSV space that occupy a gradient in value
Parameters
----------
color : tuple
RGBA tuple with values between 0 and 1.
n : int >= 2
Number of colors on the gradient.
Returns
-------
colors : tuple of tuples, len = n
N RGBA tuples that occupy a gradient in value (low to high) but share
saturation and hue with the input color.
"""
r, g, b, a = color
h, s, v = rgb_to_hsv(r, g, b)
gradient_range = np.sqrt(n / 10.)
if v > 0.5:
v_max = min(0.95, v + gradient_range / 2)
v_min = max(0.05, v_max - gradient_range)
else:
v_min = max(0.05, v - gradient_range / 2)
v_max = min(0.95, v_min + gradient_range)
hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n))
rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors)
rgba_colors = ((r_, g_, b_, a,) for r_, g_, b_ in rgb_colors)
return tuple(rgba_colors)
def _n_colors(n, bytes_=False, cmap='hsv'):
"""Produce a list of n unique RGBA color tuples based on a colormap
Parameters
----------
n : int
Number of colors.
bytes : bool
Return colors as integers values between 0 and 255 (instead of floats
between 0 and 1).
cmap : str
Which colormap to use.
Returns
-------
colors : array, shape (n, 4)
RGBA color values.
"""
n_max = 2 ** 10
if n > n_max:
raise NotImplementedError("Can't produce more than %i unique "
"colors" % n_max)
from matplotlib.cm import get_cmap
cm = get_cmap(cmap, n_max)
pos = np.linspace(0, 1, n, False)
colors = cm(pos, bytes=bytes_)
if bytes_:
# make sure colors are unique
for ii, c in enumerate(colors):
if np.any(np.all(colors[:ii] == c, 1)):
raise RuntimeError('Could not get %d unique colors from %s '
'colormap. Try using a different colormap.'
% (n, cmap))
return colors
class Label(object):
"""A FreeSurfer/MNE label with vertices restricted to one hemisphere
Labels can be combined with the ``+`` operator:
* Duplicate vertices are removed.
* If duplicate vertices have conflicting position values, an error
is raised.
* Values of duplicate vertices are summed.
Parameters
----------
vertices : array (length N)
vertex indices (0 based).
pos : array (N by 3) | None
locations in meters. If None, then zeros are used.
values : array (length N) | None
values at the vertices. If None, then ones are used.
hemi : 'lh' | 'rh'
Hemisphere to which the label applies.
comment : str
Kept as information but not used by the object itself.
name : str
Kept as information but not used by the object itself.
filename : str
Kept as information but not used by the object itself.
subject : str | None
Name of the subject the label is from.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
color : None | tuple
Default label color, represented as RGBA tuple with values between 0
and 1.
comment : str
Comment from the first line of the label file.
hemi : 'lh' | 'rh'
Hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
pos : array, shape = (n_pos, 3)
Locations in meters.
subject : str | None
Subject name. It is best practice to set this to the proper
value on initialization, but it can also be set manually.
values : array, len = n_pos
Values at the vertices.
verbose : bool, str, int, or None
See above.
vertices : array, len = n_pos
Vertex indices (0 based)
"""
@verbose
def __init__(self, vertices, pos=None, values=None, hemi=None, comment="",
name=None, filename=None, subject=None, color=None,
verbose=None):
# check parameters
if not isinstance(hemi, string_types):
raise ValueError('hemi must be a string, not %s' % type(hemi))
vertices = np.asarray(vertices)
if np.any(np.diff(vertices.astype(int)) <= 0):
raise ValueError('Vertices must be ordered in increasing order.')
if color is not None:
from matplotlib.colors import colorConverter
color = colorConverter.to_rgba(color)
if values is None:
values = np.ones(len(vertices))
else:
values = np.asarray(values)
if pos is None:
pos = np.zeros((len(vertices), 3))
else:
pos = np.asarray(pos)
if not (len(vertices) == len(values) == len(pos)):
raise ValueError("vertices, values and pos need to have same "
"length (number of vertices)")
# name
if name is None and filename is not None:
name = op.basename(filename[:-6])
self.vertices = vertices
self.pos = pos
self.values = values
self.hemi = hemi
self.comment = comment
self.verbose = verbose
self.subject = _check_subject(None, subject, False)
self.color = color
self.name = name
self.filename = filename
def __setstate__(self, state):
self.vertices = state['vertices']
self.pos = state['pos']
self.values = state['values']
self.hemi = state['hemi']
self.comment = state['comment']
self.verbose = state['verbose']
self.subject = state.get('subject', None)
self.color = state.get('color', None)
self.name = state['name']
self.filename = state['filename']
def __getstate__(self):
out = dict(vertices=self.vertices,
pos=self.pos,
values=self.values,
hemi=self.hemi,
comment=self.comment,
verbose=self.verbose,
subject=self.subject,
color=self.color,
name=self.name,
filename=self.filename)
return out
def __repr__(self):
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
n_vert = len(self)
return "<Label | %s, %s : %i vertices>" % (name, self.hemi, n_vert)
def __len__(self):
return len(self.vertices)
def __add__(self, other):
if isinstance(other, BiHemiLabel):
return other + self
elif isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
if self.hemi != other.hemi:
name = '%s + %s' % (self.name, other.name)
if self.hemi == 'lh':
lh, rh = self.copy(), other.copy()
else:
lh, rh = other.copy(), self.copy()
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
# check for overlap
duplicates = np.intersect1d(self.vertices, other.vertices)
n_dup = len(duplicates)
if n_dup:
self_dup = [np.where(self.vertices == d)[0][0]
for d in duplicates]
other_dup = [np.where(other.vertices == d)[0][0]
for d in duplicates]
if not np.all(self.pos[self_dup] == other.pos[other_dup]):
err = ("Labels %r and %r: vertices overlap but differ in "
"position values" % (self.name, other.name))
raise ValueError(err)
isnew = np.array([v not in duplicates for v in other.vertices])
vertices = np.hstack((self.vertices, other.vertices[isnew]))
pos = np.vstack((self.pos, other.pos[isnew]))
# find position of other's vertices in new array
tgt_idx = [np.where(vertices == v)[0][0] for v in other.vertices]
n_self = len(self.values)
n_other = len(other.values)
new_len = n_self + n_other - n_dup
values = np.zeros(new_len, dtype=self.values.dtype)
values[:n_self] += self.values
values[tgt_idx] += other.values
else:
vertices = np.hstack((self.vertices, other.vertices))
pos = np.vstack((self.pos, other.pos))
values = np.hstack((self.values, other.values))
indcs = np.argsort(vertices)
vertices, pos, values = vertices[indcs], pos[indcs, :], values[indcs]
comment = "%s + %s" % (self.comment, other.comment)
name0 = self.name if self.name else 'unnamed'
name1 = other.name if other.name else 'unnamed'
name = "%s + %s" % (name0, name1)
color = _blend_colors(self.color, other.color)
verbose = self.verbose or other.verbose
label = Label(vertices, pos, values, self.hemi, comment, name, None,
self.subject, color, verbose)
return label
def __sub__(self, other):
if isinstance(other, BiHemiLabel):
if self.hemi == 'lh':
return self - other.lh
else:
return self - other.rh
elif isinstance(other, Label):
if self.subject != other.subject:
raise ValueError('Label subject parameters must match, got '
'"%s" and "%s". Consider setting the '
'subject parameter on initialization, or '
'setting label.subject manually before '
'combining labels.' % (self.subject,
other.subject))
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
if self.hemi == other.hemi:
keep = in1d(self.vertices, other.vertices, True, invert=True)
else:
keep = np.arange(len(self.vertices))
name = "%s - %s" % (self.name or 'unnamed', other.name or 'unnamed')
return Label(self.vertices[keep], self.pos[keep], self.values[keep],
self.hemi, self.comment, name, None, self.subject,
self.color, self.verbose)
def save(self, filename):
"""Write to disk as FreeSurfer \*.label file
Parameters
----------
filename : string
Path to label file to produce.
Notes
-----
Note that due to file specification limitations, the Label's subject
and color attributes are not saved to disk.
"""
write_label(filename, self)
def copy(self):
"""Copy the label instance.
Returns
-------
label : instance of Label
The copied label.
"""
return cp.deepcopy(self)
def fill(self, src, name=None):
"""Fill the surface between sources for a label defined in source space
Parameters
----------
src : SourceSpaces
Source space in which the label was defined. If a source space is
provided, the label is expanded to fill in surface vertices that
lie between the vertices included in the source space. For the
added vertices, ``pos`` is filled in with positions from the
source space, and ``values`` is filled in from the closest source
space vertex.
name : None | str
Name for the new Label (default is self.name).
Returns
-------
label : Label
The label covering the same vertices in source space but also
including intermediate surface vertices.
"""
# find source space patch info
if self.hemi == 'lh':
hemi_src = src[0]
elif self.hemi == 'rh':
hemi_src = src[1]
if not np.all(in1d(self.vertices, hemi_src['vertno'])):
msg = "Source space does not contain all of the label's vertices"
raise ValueError(msg)
nearest = hemi_src['nearest']
if nearest is None:
logger.warn("Computing patch info for source space, this can take "
"a while. In order to avoid this in the future, run "
"mne.add_source_space_distances() on the source space "
"and save it.")
add_source_space_distances(src)
nearest = hemi_src['nearest']
# find new vertices
include = in1d(nearest, self.vertices, False)
vertices = np.nonzero(include)[0]
# values
nearest_in_label = digitize(nearest[vertices], self.vertices, True)
values = self.values[nearest_in_label]
# pos
pos = hemi_src['rr'][vertices]
if name is None:
name = self.name
label = Label(vertices, pos, values, self.hemi, self.comment, name,
None, self.subject, self.color)
return label
@verbose
def smooth(self, subject=None, smooth=2, grade=None,
subjects_dir=None, n_jobs=1, copy=True, verbose=None):
"""Smooth the label
Useful for filling in labels made in a
decimated source space for display.
Parameters
----------
subject : str | None
The name of the subject used. If None, the value will be
taken from self.subject.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used. For a
grade of 5 (e.g., fsaverage), a smoothing of 2 will fill a
label.
grade : int, list (of two arrays), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
n_jobs : int
Number of jobs to run in parallel
copy : bool
If False, smoothing is done in-place.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
label : instance of Label
The smoothed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using mne.read_surface
with label.vertices.
"""
subject = _check_subject(self.subject, subject)
return self.morph(subject, subject, smooth, grade, subjects_dir,
n_jobs, copy)
@verbose
def morph(self, subject_from=None, subject_to=None, smooth=5, grade=None,
subjects_dir=None, n_jobs=1, copy=True, verbose=None):
"""Morph the label
Useful for transforming a label from one subject to another.
Parameters
----------
subject_from : str | None
The name of the subject of the current label. If None, the
initial subject will be taken from self.subject.
subject_to : str
The name of the subject to morph the label to. This will
be put in label.subject of the output label file.
smooth : int
Number of iterations for the smoothing of the surface data.
Cannot be None here since not all vertices are used.
grade : int, list (of two arrays), array, or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
values will be morphed to the set of vertices specified in grade[0]
and grade[1], assuming that these are vertices for the left and
right hemispheres. Note that specifying the vertices (e.g.,
``grade=[np.arange(10242), np.arange(10242)]`` for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. If one array is used, it is assumed
that all vertices belong to the hemisphere of the label. To create
a label filling the surface, use None.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
n_jobs : int
Number of jobs to run in parallel.
copy : bool
If False, the morphing is done in-place.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label : instance of Label
The morphed label.
Notes
-----
This function will set label.pos to be all zeros. If the positions
on the new surface are required, consider using `mne.read_surface`
with `label.vertices`.
"""
subject_from = _check_subject(self.subject, subject_from)
if not isinstance(subject_to, string_types):
raise TypeError('"subject_to" must be entered as a string')
if not isinstance(smooth, int):
raise TypeError('smooth must be an integer')
if np.all(self.values == 0):
raise ValueError('Morphing label with all zero values will result '
'in the label having no vertices. Consider using '
'something like label.values.fill(1.0).')
if(isinstance(grade, np.ndarray)):
if self.hemi == 'lh':
grade = [grade, np.array([], int)]
else:
grade = [np.array([], int), grade]
if self.hemi == 'lh':
vertices = [self.vertices, np.array([], int)]
else:
vertices = [np.array([], int), self.vertices]
data = self.values[:, np.newaxis]
stc = SourceEstimate(data, vertices, tmin=1, tstep=1,
subject=subject_from)
stc = morph_data(subject_from, subject_to, stc, grade=grade,
smooth=smooth, subjects_dir=subjects_dir,
warn=False, n_jobs=n_jobs)
inds = np.nonzero(stc.data)[0]
if copy is True:
label = self.copy()
else:
label = self
label.values = stc.data[inds, :].ravel()
label.pos = np.zeros((len(inds), 3))
if label.hemi == 'lh':
label.vertices = stc.vertices[0][inds]
else:
label.vertices = stc.vertices[1][inds]
label.subject = subject_to
return label
def split(self, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split the Label into two or more parts
Parameters
----------
parts : int >= 2 | tuple of str
A sequence of strings specifying label names for the new labels
(from posterior to anterior), or the number of new labels to create
(default is 2). If a number is specified, names of the new labels
will be the input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
subjects_dir : None | str
Path to SUBJECTS_DIR if it is not set in the environment.
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label (len = n_parts)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
Works by finding the label's principal eigen-axis on the spherical
surface, projecting all label vertex coordinates onto this axis and
dividing them at regular spatial intervals.
"""
return split_label(self, parts, subject, subjects_dir, freesurfer)
def get_vertices_used(self, vertices=None):
"""Get the source space's vertices inside the label
Parameters
----------
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_verts : ndarray of in, shape (n_label_vertices,)
The vertices of the label corresponding used by the data.
"""
if vertices is None:
vertices = np.arange(10242)
label_verts = vertices[in1d(vertices, self.vertices)]
return label_verts
def get_tris(self, tris, vertices=None):
"""Get the source space's triangles inside the label
Parameters
----------
tris : ndarray of int, shape (n_tris, 3)
The set of triangles corresponding to the vertices in a
source space.
vertices : ndarray of int, shape (n_vertices,) | None
The set of vertices to compare the label to. If None, equals to
``np.arange(10242)``. Defaults to None.
Returns
-------
label_tris : ndarray of int, shape (n_tris, 3)
The subset of tris used by the label
"""
vertices_ = self.get_vertices_used(vertices)
selection = np.all(in1d(tris, vertices_).reshape(tris.shape),
axis=1)
label_tris = tris[selection]
if len(np.unique(label_tris)) < len(vertices_):
logger.info('Surprising label structure. Trying to repair '
'triangles.')
dropped_vertices = np.setdiff1d(vertices_, label_tris)
n_dropped = len(dropped_vertices)
assert n_dropped == (len(vertices_) - len(np.unique(label_tris)))
# put missing vertices as extra zero-length triangles
add_tris = (dropped_vertices +
np.zeros((len(dropped_vertices), 3), dtype=int).T)
label_tris = np.r_[label_tris, add_tris.T]
assert len(np.unique(label_tris)) == len(vertices_)
return label_tris
class BiHemiLabel(object):
"""A freesurfer/MNE label with vertices in both hemispheres
Parameters
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
name for the label
color : None | matplotlib color
Label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Attributes
----------
lh : Label
Label for the left hemisphere.
rh : Label
Label for the right hemisphere.
name : None | str
A name for the label. It is OK to change that attribute manually.
subject : str | None
Subject the label is from.
"""
def __init__(self, lh, rh, name=None, color=None):
if lh.subject != rh.subject:
raise ValueError('lh.subject (%s) and rh.subject (%s) must '
'agree' % (lh.subject, rh.subject))
self.lh = lh
self.rh = rh
self.name = name
self.subject = lh.subject
self.color = color
self.hemi = 'both'
def __repr__(self):
temp = "<BiHemiLabel | %s, lh : %i vertices, rh : %i vertices>"
name = 'unknown, ' if self.subject is None else self.subject + ', '
name += repr(self.name) if self.name is not None else "unnamed"
return temp % (name, len(self.lh), len(self.rh))
def __len__(self):
return len(self.lh) + len(self.rh)
def __add__(self, other):
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh + other
rh = self.rh
else:
lh = self.lh
rh = self.rh + other
elif isinstance(other, BiHemiLabel):
lh = self.lh + other.lh
rh = self.rh + other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
name = '%s + %s' % (self.name, other.name)
color = _blend_colors(self.color, other.color)
return BiHemiLabel(lh, rh, name, color)
def __sub__(self, other):
if isinstance(other, Label):
if other.hemi == 'lh':
lh = self.lh - other
rh = self.rh
else:
rh = self.rh - other
lh = self.lh
elif isinstance(other, BiHemiLabel):
lh = self.lh - other.lh
rh = self.rh - other.rh
else:
raise TypeError("Need: Label or BiHemiLabel. Got: %r" % other)
if len(lh.vertices) == 0:
return rh
elif len(rh.vertices) == 0:
return lh
else:
name = '%s - %s' % (self.name, other.name)
return BiHemiLabel(lh, rh, name, self.color)
def read_label(filename, subject=None, color=None):
"""Read FreeSurfer Label file
Parameters
----------
filename : string
Path to label file.
subject : str | None
Name of the subject the data are defined for.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
color : None | matplotlib color
Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red).
Note that due to file specification limitations, the color isn't saved
to or loaded from files written to disk.
Returns
-------
label : Label
Instance of Label object with attributes:
- ``comment``: comment from the first line of the label file
- ``vertices``: vertex indices (0 based, column 1)
- ``pos``: locations in meters (columns 2 - 4 divided by 1000)
- ``values``: values at the vertices (column 5)
See Also
--------
read_labels_from_annot
"""
if subject is not None and not isinstance(subject, string_types):
raise TypeError('subject must be a string')
# find hemi
basename = op.basename(filename)
if basename.endswith('lh.label') or basename.startswith('lh.'):
hemi = 'lh'
elif basename.endswith('rh.label') or basename.startswith('rh.'):
hemi = 'rh'
else:
raise ValueError('Cannot find which hemisphere it is. File should end'
' with lh.label or rh.label')
# find name
if basename.startswith(('lh.', 'rh.')):
basename_ = basename[3:]
if basename.endswith('.label'):
basename_ = basename[:-6]
else:
basename_ = basename[:-9]
name = "%s-%s" % (basename_, hemi)
# read the file
with open(filename, 'r') as fid:
comment = fid.readline().replace('\n', '')[1:]
nv = int(fid.readline())
data = np.empty((5, nv))
for i, line in enumerate(fid):
data[:, i] = line.split()
# let's make sure everything is ordered correctly
vertices = np.array(data[0], dtype=np.int32)
pos = 1e-3 * data[1:4].T
values = data[4]
order = np.argsort(vertices)
vertices = vertices[order]
pos = pos[order]
values = values[order]
label = Label(vertices, pos, values, hemi, comment, name, filename,
subject, color)
return label
@verbose
def write_label(filename, label, verbose=None):
"""Write a FreeSurfer label
Parameters
----------
filename : string
Path to label file to produce.
label : Label
The label object to save.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
Note that due to file specification limitations, the Label's subject and
color attributes are not saved to disk.
See Also
--------
write_labels_to_annot
"""
hemi = label.hemi
path_head, name = op.split(filename)
if name.endswith('.label'):
name = name[:-6]
if not (name.startswith(hemi) or name.endswith(hemi)):
name += '-' + hemi
filename = op.join(path_head, name) + '.label'
logger.info('Saving label to : %s' % filename)
with open(filename, 'wb') as fid:
n_vertices = len(label.vertices)
data = np.zeros((n_vertices, 5), dtype=np.float)
data[:, 0] = label.vertices
data[:, 1:4] = 1e3 * label.pos
data[:, 4] = label.values
fid.write(b("#%s\n" % label.comment))
fid.write(b("%d\n" % n_vertices))
for d in data:
fid.write(b("%d %f %f %f %f\n" % tuple(d)))
return label
def split_label(label, parts=2, subject=None, subjects_dir=None,
freesurfer=False):
"""Split a Label into two or more parts
Parameters
----------
label : Label | str
Label which is to be split (Label object or path to a label file).
parts : int >= 2 | tuple of str
A sequence of strings specifying label names for the new labels (from
posterior to anterior), or the number of new labels to create (default
is 2). If a number is specified, names of the new labels will be the
input label's name with div1, div2 etc. appended.
subject : None | str
Subject which this label belongs to (needed to locate surface file;
should only be specified if it is not specified in the label).
subjects_dir : None | str
Path to SUBJECTS_DIR if it is not set in the environment.
freesurfer : bool
By default (``False``) ``split_label`` uses an algorithm that is
slightly optimized for performance and numerical precision. Set
``freesurfer`` to ``True`` in order to replicate label splits from
FreeSurfer's ``mris_divide_parcellation``.
Returns
-------
labels : list of Label (len = n_parts)
The labels, starting from the lowest to the highest end of the
projection axis.
Notes
-----
Works by finding the label's principal eigen-axis on the spherical surface,
projecting all label vertex coordinates onto this axis and dividing them at
regular spatial intervals.
"""
# find the label
if isinstance(label, BiHemiLabel):
raise TypeError("Can only split labels restricted to one hemisphere.")
elif isinstance(label, string_types):
label = read_label(label)
# find the parts
if np.isscalar(parts):
n_parts = int(parts)
if label.name.endswith(('lh', 'rh')):
basename = label.name[:-3]
name_ext = label.name[-3:]
else:
basename = label.name
name_ext = ''
name_pattern = "%s_div%%i%s" % (basename, name_ext)
names = tuple(name_pattern % i for i in range(1, n_parts + 1))
else:
names = parts
n_parts = len(names)
if n_parts < 2:
raise ValueError("Can't split label into %i parts" % n_parts)
# find the subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if label.subject is None and subject is None:
raise ValueError("The subject needs to be specified.")
elif subject is None:
subject = label.subject
elif label.subject is None:
pass
elif subject != label.subject:
raise ValueError("The label specifies a different subject (%r) from "
"the subject parameter (%r)."
% label.subject, subject)
# find the spherical surface
surf_fname = '.'.join((label.hemi, 'sphere'))
surf_path = os.path.join(subjects_dir, subject, "surf", surf_fname)
surface_points, surface_tris = read_surface(surf_path)
# find the label coordinates on the surface
points = surface_points[label.vertices]
center = np.mean(points, axis=0)
centered_points = points - center
# find the label's normal
if freesurfer:
# find the Freesurfer vertex closest to the center
distance = np.sqrt(np.sum(centered_points ** 2, axis=1))
i_closest = np.argmin(distance)
closest_vertex = label.vertices[i_closest]
# find the normal according to freesurfer convention
idx = np.any(surface_tris == closest_vertex, axis=1)
tris_for_normal = surface_tris[idx]
r1 = surface_points[tris_for_normal[:, 0], :]
r2 = surface_points[tris_for_normal[:, 1], :]
r3 = surface_points[tris_for_normal[:, 2], :]
tri_normals = fast_cross_3d((r2 - r1), (r3 - r1))
normal = np.mean(tri_normals, axis=0)
normal /= linalg.norm(normal)
else:
# Normal of the center
normal = center / linalg.norm(center)
# project all vertex coordinates on the tangential plane for this point
q, _ = linalg.qr(normal[:, np.newaxis])
tangent_u = q[:, 1:]
m_obs = np.dot(centered_points, tangent_u)
# find principal eigendirection
m_cov = np.dot(m_obs.T, m_obs)
w, vr = linalg.eig(m_cov)
i = np.argmax(w)
eigendir = vr[:, i]
# project back into 3d space
axis = np.dot(tangent_u, eigendir)
# orient them from posterior to anterior
if axis[1] < 0:
axis *= -1
# project the label on the axis
proj = np.dot(points, axis)
# assign mark (new label index)
proj -= proj.min()
proj /= (proj.max() / n_parts)
mark = proj // 1
mark[mark == n_parts] = n_parts - 1
# colors
if label.color is None:
colors = (None,) * n_parts
else:
colors = _split_colors(label.color, n_parts)
# construct new labels
labels = []
for i, name, color in zip(range(n_parts), names, colors):
idx = (mark == i)
vert = label.vertices[idx]
pos = label.pos[idx]
values = label.values[idx]
hemi = label.hemi
comment = label.comment
lbl = Label(vert, pos, values, hemi, comment, name, None, subject,
color)
labels.append(lbl)
return labels
def label_sign_flip(label, src):
"""Compute sign for label averaging
Parameters
----------
label : Label
A label.
src : list of dict
The source space over which the label is defined.
Returns
-------
flip : array
Sign flip vector (contains 1 or -1)
"""
if len(src) != 2:
raise ValueError('Only source spaces with 2 hemisphers are accepted')
lh_vertno = src[0]['vertno']
rh_vertno = src[1]['vertno']
# get source orientations
if label.hemi == 'lh':
vertno_sel = np.intersect1d(lh_vertno, label.vertices)
if len(vertno_sel) == 0:
return np.array([], int)
ori = src[0]['nn'][vertno_sel]
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(rh_vertno, label.vertices)
if len(vertno_sel) == 0:
return np.array([], int)
ori = src[1]['nn'][vertno_sel]
else:
raise Exception("Unknown hemisphere type")
_, _, Vh = linalg.svd(ori, full_matrices=False)
# Comparing to the direction of the first right singular vector
flip = np.sign(np.dot(ori, Vh[:, 0] if len(vertno_sel) > 3 else Vh[0]))
return flip
def stc_to_label(stc, src=None, smooth=True, connected=False,
subjects_dir=None):
"""Compute a label from the non-zero sources in an stc object.
Parameters
----------
stc : SourceEstimate
The source estimates.
src : SourceSpaces | str | None
The source space over which the source estimates are defined.
If it's a string it should the subject name (e.g. fsaverage).
Can be None if stc.subject is not None.
smooth : bool
Fill in vertices on the cortical surface that are not in the source
space based on the closest source space vertex (requires
src to be a SourceSpace).
connected : bool
If True a list of connected labels will be returned in each
hemisphere. The labels are ordered in decreasing order depending
of the maximum value in the stc.
subjects_dir : str | None
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
labels : list of Labels | list of list of Labels
The generated labels. If connected is False, it returns
a list of Labels (one per hemisphere). If no Label is available
in a hemisphere, None is returned. If connected is True,
it returns for each hemisphere a list of connected labels
ordered in decreasing order depending of the maximum value in the stc.
If no Label is available in an hemisphere, an empty list is returned.
"""
if not isinstance(smooth, bool):
raise ValueError('smooth should be True or False. Got %s.' % smooth)
src = stc.subject if src is None else src
if src is None:
raise ValueError('src cannot be None if stc.subject is None')
if isinstance(src, string_types):
subject = src
else:
subject = stc.subject
if not isinstance(stc, SourceEstimate):
raise ValueError('SourceEstimate should be surface source estimates')
if isinstance(src, string_types):
if connected:
raise ValueError('The option to return only connected labels is '
'only available if source spaces are provided.')
if smooth:
msg = ("stc_to_label with smooth=True requires src to be an "
"instance of SourceSpace")
raise ValueError(msg)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf_path_from = op.join(subjects_dir, src, 'surf')
rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
rr = [rr_lh, rr_rh]
tris = [tris_lh, tris_rh]
else:
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or a set of source spaces')
if len(src) != 2:
raise ValueError('source space should contain the 2 hemispheres')
rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
tris = [src[0]['tris'], src[1]['tris']]
src_conn = spatial_src_connectivity(src).tocsr()
labels = []
cnt = 0
cnt_full = 0
for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
zip(['lh', 'rh'], stc.vertices, tris, rr)):
this_data = stc.data[cnt:cnt + len(this_vertno)]
e = mesh_edges(this_tris)
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
if connected: # we know src *must* be a SourceSpaces now
vertno = np.where(src[hemi_idx]['inuse'])[0]
if not len(np.setdiff1d(this_vertno, vertno)) == 0:
raise RuntimeError('stc contains vertices not present '
'in source space, did you morph?')
tmp = np.zeros((len(vertno), this_data.shape[1]))
this_vertno_idx = np.searchsorted(vertno, this_vertno)
tmp[this_vertno_idx] = this_data
this_data = tmp
offset = cnt_full + len(this_data)
this_src_conn = src_conn[cnt_full:offset, cnt_full:offset].tocoo()
this_data_abs_max = np.abs(this_data).max(axis=1)
clusters, _ = _find_clusters(this_data_abs_max, 0.,
connectivity=this_src_conn)
cnt_full += len(this_data)
# Then order clusters in descending order based on maximum value
clusters_max = np.argsort([np.max(this_data_abs_max[c])
for c in clusters])[::-1]
clusters = [clusters[k] for k in clusters_max]
clusters = [vertno[c] for c in clusters]
else:
clusters = [this_vertno[np.any(this_data, axis=1)]]
cnt += len(this_vertno)
clusters = [c for c in clusters if len(c) > 0]
if len(clusters) == 0:
if not connected:
this_labels = None
else:
this_labels = []
else:
this_labels = []
colors = _n_colors(len(clusters))
for c, color in zip(clusters, colors):
idx_use = c
label = Label(idx_use, this_rr[idx_use], None, hemi,
'Label from stc', subject=subject,
color=color)
if smooth:
label = label.fill(src)
this_labels.append(label)
if not connected:
this_labels = this_labels[0]
labels.append(this_labels)
return labels
def _verts_within_dist(graph, sources, max_dist):
"""Find all vertices wihin a maximum geodesic distance from source
Parameters
----------
graph : scipy.sparse.csr_matrix
Sparse matrix with distances between adjacent vertices.
sources : list of int
Source vertices.
max_dist : float
Maximum geodesic distance.
Returns
-------
verts : array
Vertices within max_dist.
dist : array
Distances from source vertex.
"""
dist_map = {}
verts_added_last = []
for source in sources:
dist_map[source] = 0
verts_added_last.append(source)
# add neighbors until no more neighbors within max_dist can be found
while len(verts_added_last) > 0:
verts_added = []
for i in verts_added_last:
v_dist = dist_map[i]
row = graph[i, :]
neighbor_vert = row.indices
neighbor_dist = row.data
for j, d in zip(neighbor_vert, neighbor_dist):
n_dist = v_dist + d
if j in dist_map:
if n_dist < dist_map[j]:
dist_map[j] = n_dist
else:
if n_dist <= max_dist:
dist_map[j] = n_dist
# we found a new vertex within max_dist
verts_added.append(j)
verts_added_last = verts_added
verts = np.sort(np.array(list(dist_map.keys()), dtype=np.int))
dist = np.array([dist_map[v] for v in verts])
return verts, dist
def _grow_labels(seeds, extents, hemis, names, dist, vert, subject):
"""Helper for parallelization of grow_labels
"""
labels = []
for seed, extent, hemi, name in zip(seeds, extents, hemis, names):
label_verts, label_dist = _verts_within_dist(dist[hemi], seed, extent)
# create a label
if len(seed) == 1:
seed_repr = str(seed)
else:
seed_repr = ','.join(map(str, seed))
comment = 'Circular label: seed=%s, extent=%0.1fmm' % (seed_repr,
extent)
label = Label(vertices=label_verts,
pos=vert[hemi][label_verts],
values=label_dist,
hemi=hemi,
comment=comment,
name=str(name),
subject=subject)
labels.append(label)
return labels
def grow_labels(subject, seeds, extents, hemis, subjects_dir=None, n_jobs=1,
overlap=True, names=None, surface='white'):
"""Generate circular labels in source space with region growing
This function generates a number of labels in source space by growing
regions starting from the vertices defined in "seeds". For each seed, a
label is generated containing all vertices within a maximum geodesic
distance on the white matter surface from the seed.
Note: "extents" and "hemis" can either be arrays with the same length as
seeds, which allows using a different extent and hemisphere for each
label, or integers, in which case the same extent and hemisphere is
used for each label.
Parameters
----------
subject : string
Name of the subject as in SUBJECTS_DIR.
seeds : int | list
Seed, or list of seeds. Each seed can be either a vertex number or
a list of vertex numbers.
extents : array | float
Extents (radius in mm) of the labels.
hemis : array | int
Hemispheres to use for the labels (0: left, 1: right).
subjects_dir : string
Path to SUBJECTS_DIR if not set in the environment.
n_jobs : int
Number of jobs to run in parallel. Likely only useful if tens
or hundreds of labels are being expanded simultaneously. Does not
apply with ``overlap=False``.
overlap : bool
Produce overlapping labels. If True (default), the resulting labels
can be overlapping. If False, each label will be grown one step at a
time, and occupied territory will not be invaded.
names : None | list of str
Assign names to the new labels (list needs to have the same length as
seeds).
surface : string
The surface used to grow the labels, defaults to the white surface.
Returns
-------
labels : list of Label
The labels' ``comment`` attribute contains information on the seed
vertex and extent; the ``values`` attribute contains distance from the
seed in millimeters
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
n_jobs = check_n_jobs(n_jobs)
# make sure the inputs are arrays
if np.isscalar(seeds):
seeds = [seeds]
seeds = np.atleast_1d([np.atleast_1d(seed) for seed in seeds])
extents = np.atleast_1d(extents)
hemis = np.atleast_1d(hemis)
n_seeds = len(seeds)
if len(extents) != 1 and len(extents) != n_seeds:
raise ValueError('The extents parameter has to be of length 1 or '
'len(seeds)')
if len(hemis) != 1 and len(hemis) != n_seeds:
raise ValueError('The hemis parameter has to be of length 1 or '
'len(seeds)')
# make the arrays the same length as seeds
if len(extents) == 1:
extents = np.tile(extents, n_seeds)
if len(hemis) == 1:
hemis = np.tile(hemis, n_seeds)
hemis = np.array(['lh' if h == 0 else 'rh' for h in hemis])
# names
if names is None:
names = ["Label_%i-%s" % items for items in enumerate(hemis)]
else:
if np.isscalar(names):
names = [names]
if len(names) != n_seeds:
raise ValueError('The names parameter has to be None or have '
'length len(seeds)')
for i, hemi in enumerate(hemis):
if not names[i].endswith(hemi):
names[i] = '-'.join((names[i], hemi))
names = np.array(names)
# load the surfaces and create the distance graphs
tris, vert, dist = {}, {}, {}
for hemi in set(hemis):
surf_fname = op.join(subjects_dir, subject, 'surf', hemi + '.' +
surface)
vert[hemi], tris[hemi] = read_surface(surf_fname)
dist[hemi] = mesh_dist(tris[hemi], vert[hemi])
if overlap:
# create the patches
parallel, my_grow_labels, _ = parallel_func(_grow_labels, n_jobs)
seeds = np.array_split(seeds, n_jobs)
extents = np.array_split(extents, n_jobs)
hemis = np.array_split(hemis, n_jobs)
names = np.array_split(names, n_jobs)
labels = sum(parallel(my_grow_labels(s, e, h, n, dist, vert, subject)
for s, e, h, n
in zip(seeds, extents, hemis, names)), [])
else:
# special procedure for non-overlapping labels
labels = _grow_nonoverlapping_labels(subject, seeds, extents, hemis,
vert, dist, names)
# add a unique color to each label
colors = _n_colors(len(labels))
for label, color in zip(labels, colors):
label.color = color
return labels
def _grow_nonoverlapping_labels(subject, seeds_, extents_, hemis, vertices_,
graphs, names_):
"""Grow labels while ensuring that they don't overlap
"""
labels = []
for hemi in set(hemis):
hemi_index = (hemis == hemi)
seeds = seeds_[hemi_index]
extents = extents_[hemi_index]
names = names_[hemi_index]
graph = graphs[hemi] # distance graph
n_vertices = len(vertices_[hemi])
n_labels = len(seeds)
# prepare parcellation
parc = np.empty(n_vertices, dtype='int32')
parc[:] = -1
# initialize active sources
sources = {} # vert -> (label, dist_from_seed)
edge = [] # queue of vertices to process
for label, seed in enumerate(seeds):
if np.any(parc[seed] >= 0):
raise ValueError("Overlapping seeds")
parc[seed] = label
for s in np.atleast_1d(seed):
sources[s] = (label, 0.)
edge.append(s)
# grow from sources
while edge:
vert_from = edge.pop(0)
label, old_dist = sources[vert_from]
# add neighbors within allowable distance
row = graph[vert_from, :]
for vert_to, dist in zip(row.indices, row.data):
new_dist = old_dist + dist
# abort if outside of extent
if new_dist > extents[label]:
continue
vert_to_label = parc[vert_to]
if vert_to_label >= 0:
_, vert_to_dist = sources[vert_to]
# abort if the vertex is occupied by a closer seed
if new_dist > vert_to_dist:
continue
elif vert_to in edge:
edge.remove(vert_to)
# assign label value
parc[vert_to] = label
sources[vert_to] = (label, new_dist)
edge.append(vert_to)
# convert parc to labels
for i in xrange(n_labels):
vertices = np.nonzero(parc == i)[0]
name = str(names[i])
label_ = Label(vertices, hemi=hemi, name=name, subject=subject)
labels.append(label_)
return labels
def _read_annot(fname):
"""Read a Freesurfer annotation from a .annot file.
Note : Copied from PySurfer
Parameters
----------
fname : str
Path to annotation file
Returns
-------
annot : numpy array, shape=(n_verts)
Annotation id at each vertex
ctab : numpy array, shape=(n_entries, 5)
RGBA + label id colortable array
names : list of str
List of region names as stored in the annot file
"""
if not op.isfile(fname):
dir_name = op.split(fname)[0]
if not op.isdir(dir_name):
raise IOError('Directory for annotation does not exist: %s',
fname)
cands = os.listdir(dir_name)
cands = [c for c in cands if '.annot' in c]
if len(cands) == 0:
raise IOError('No such file %s, no candidate parcellations '
'found in directory' % fname)
else:
raise IOError('No such file %s, candidate parcellations in '
'that directory: %s' % (fname, ', '.join(cands)))
with open(fname, "rb") as fid:
n_verts = np.fromfile(fid, '>i4', 1)[0]
data = np.fromfile(fid, '>i4', n_verts * 2).reshape(n_verts, 2)
annot = data[data[:, 0], 1]
ctab_exists = np.fromfile(fid, '>i4', 1)[0]
if not ctab_exists:
raise Exception('Color table not found in annotation file')
n_entries = np.fromfile(fid, '>i4', 1)[0]
if n_entries > 0:
length = np.fromfile(fid, '>i4', 1)[0]
orig_tab = np.fromfile(fid, '>c', length)
orig_tab = orig_tab[:-1]
names = list()
ctab = np.zeros((n_entries, 5), np.int)
for i in range(n_entries):
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16) +
ctab[i, 3] * (2 ** 24))
else:
ctab_version = -n_entries
if ctab_version != 2:
raise Exception('Color table version not supported')
n_entries = np.fromfile(fid, '>i4', 1)[0]
ctab = np.zeros((n_entries, 5), np.int)
length = np.fromfile(fid, '>i4', 1)[0]
np.fromfile(fid, "|S%d" % length, 1) # Orig table path
entries_to_read = np.fromfile(fid, '>i4', 1)[0]
names = list()
for i in range(entries_to_read):
np.fromfile(fid, '>i4', 1) # Structure
name_length = np.fromfile(fid, '>i4', 1)[0]
name = np.fromfile(fid, "|S%d" % name_length, 1)[0]
names.append(name)
ctab[i, :4] = np.fromfile(fid, '>i4', 4)
ctab[i, 4] = (ctab[i, 0] + ctab[i, 1] * (2 ** 8) +
ctab[i, 2] * (2 ** 16))
# convert to more common alpha value
ctab[:, 3] = 255 - ctab[:, 3]
return annot, ctab, names
def _get_annot_fname(annot_fname, subject, hemi, parc, subjects_dir):
"""Helper function to get the .annot filenames and hemispheres"""
if annot_fname is not None:
# we use use the .annot file specified by the user
hemis = [op.basename(annot_fname)[:2]]
if hemis[0] not in ['lh', 'rh']:
raise ValueError('Could not determine hemisphere from filename, '
'filename has to start with "lh" or "rh".')
annot_fname = [annot_fname]
else:
# construct .annot file names for requested subject, parc, hemi
if hemi not in ['lh', 'rh', 'both']:
raise ValueError('hemi has to be "lh", "rh", or "both"')
if hemi == 'both':
hemis = ['lh', 'rh']
else:
hemis = [hemi]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
dst = op.join(subjects_dir, subject, 'label', '%%s.%s.annot' % parc)
annot_fname = [dst % hemi_ for hemi_ in hemis]
return annot_fname, hemis
@verbose
def read_labels_from_annot(subject, parc='aparc', hemi='both',
surf_name='white', annot_fname=None, regexp=None,
subjects_dir=None, verbose=None):
"""Read labels from a FreeSurfer annotation file
Note: Only cortical labels will be returned.
Parameters
----------
subject : str
The subject for which to read the parcellation for.
parc : str
The parcellation to use, e.g., 'aparc' or 'aparc.a2009s'.
hemi : str
The hemisphere to read the parcellation for, can be 'lh', 'rh',
or 'both'.
surf_name : str
Surface used to obtain vertex locations, e.g., 'white', 'pial'
annot_fname : str or None
Filename of the .annot file. If not None, only this file is read
and 'parc' and 'hemi' are ignored.
regexp : str
Regular expression or substring to select particular labels from the
parcellation. E.g. 'superior' will return all labels in which this
substring is contained.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
labels : list of Label
The labels, sorted by label name (ascending).
"""
logger.info('Reading labels from parcellation..')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if regexp is not None:
# allow for convenient substring match
r_ = (re.compile('.*%s.*' % regexp if regexp.replace('_', '').isalnum()
else regexp))
# now we are ready to create the labels
n_read = 0
labels = list()
for fname, hemi in zip(annot_fname, hemis):
# read annotation
annot, ctab, label_names = _read_annot(fname)
label_rgbas = ctab[:, :4]
label_ids = ctab[:, -1]
# load the vertex positions from surface
fname_surf = op.join(subjects_dir, subject, 'surf',
'%s.%s' % (hemi, surf_name))
vert_pos, _ = read_surface(fname_surf)
vert_pos /= 1e3 # the positions in labels are in meters
for label_id, label_name, label_rgba in\
zip(label_ids, label_names, label_rgbas):
vertices = np.where(annot == label_id)[0]
if len(vertices) == 0:
# label is not part of cortical surface
continue
name = label_name.decode() + '-' + hemi
if (regexp is not None) and not r_.match(name):
continue
pos = vert_pos[vertices, :]
values = np.zeros(len(vertices))
label_rgba = tuple(label_rgba / 255.)
label = Label(vertices, pos, values, hemi, name=name,
subject=subject, color=label_rgba)
labels.append(label)
n_read = len(labels) - n_read
logger.info(' read %d labels from %s' % (n_read, fname))
# sort the labels by label name
labels = sorted(labels, key=lambda l: l.name)
if len(labels) == 0:
msg = 'No labels found.'
if regexp is not None:
msg += ' Maybe the regular expression %r did not match?' % regexp
raise RuntimeError(msg)
logger.info('[done]')
return labels
def _write_annot(fname, annot, ctab, names):
"""Write a Freesurfer annotation to a .annot file.
Parameters
----------
fname : str
Path to annotation file
annot : numpy array, shape=(n_verts)
Annotation id at each vertex. Note: IDs must be computed from
RGBA colors, otherwise the mapping will be invalid.
ctab : numpy array, shape=(n_entries, 4)
RGBA colortable array.
names : list of str
List of region names to be stored in the annot file
"""
with open(fname, 'wb') as fid:
n_verts = len(annot)
np.array(n_verts, dtype='>i4').tofile(fid)
data = np.zeros((n_verts, 2), dtype='>i4')
data[:, 0] = np.arange(n_verts)
data[:, 1] = annot
data.ravel().tofile(fid)
# indicate that color table exists
np.array(1, dtype='>i4').tofile(fid)
# color table version 2
np.array(-2, dtype='>i4').tofile(fid)
# write color table
n_entries = len(ctab)
np.array(n_entries, dtype='>i4').tofile(fid)
# write dummy color table name
table_name = 'MNE-Python Colortable'
np.array(len(table_name), dtype='>i4').tofile(fid)
np.fromstring(table_name, dtype=np.uint8).tofile(fid)
# number of entries to write
np.array(n_entries, dtype='>i4').tofile(fid)
# write entries
for ii, (name, color) in enumerate(zip(names, ctab)):
np.array(ii, dtype='>i4').tofile(fid)
np.array(len(name), dtype='>i4').tofile(fid)
np.fromstring(name, dtype=np.uint8).tofile(fid)
np.array(color[:4], dtype='>i4').tofile(fid)
@verbose
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
subjects_dir=None, annot_fname=None,
colormap='hsv', hemi='both', verbose=None):
"""Create a FreeSurfer annotation from a list of labels
Parameters
----------
labels : list with instances of mne.Label
The labels to create a parcellation from.
subject : str | None
The subject for which to write the parcellation for.
parc : str | None
The parcellation name to use.
overwrite : bool
Overwrite files if they already exist.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
annot_fname : str | None
Filename of the .annot file. If not None, only this file is written
and 'parc' and 'subject' are ignored.
colormap : str
Colormap to use to generate label colors for labels that do not
have a color specified.
hemi : 'both' | 'lh' | 'rh'
The hemisphere(s) for which to write \*.annot files (only applies if
annot_fname is not specified; default is 'both').
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Notes
-----
Vertices that are not covered by any of the labels are assigned to a label
named "unknown".
"""
logger.info('Writing labels to parcellation..')
subjects_dir = get_subjects_dir(subjects_dir)
# get the .annot filenames and hemispheres
annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
subjects_dir)
if not overwrite:
for fname in annot_fname:
if op.exists(fname):
raise ValueError('File %s exists. Use "overwrite=True" to '
'overwrite it' % fname)
# prepare container for data to save:
to_save = []
# keep track of issues found in the labels
duplicate_colors = []
invalid_colors = []
overlap = []
no_color = (-1, -1, -1, -1)
no_color_rgb = (-1, -1, -1)
for hemi, fname in zip(hemis, annot_fname):
hemi_labels = [label for label in labels if label.hemi == hemi]
n_hemi_labels = len(hemi_labels)
if n_hemi_labels == 0:
ctab = np.empty((0, 4), dtype=np.int32)
ctab_rgb = ctab[:, :3]
else:
hemi_labels.sort(key=lambda label: label.name)
# convert colors to 0-255 RGBA tuples
hemi_colors = [no_color if label.color is None else
tuple(int(round(255 * i)) for i in label.color)
for label in hemi_labels]
ctab = np.array(hemi_colors, dtype=np.int32)
ctab_rgb = ctab[:, :3]
# make color dict (for annot ID, only R, G and B count)
labels_by_color = defaultdict(list)
for label, color in zip(hemi_labels, ctab_rgb):
labels_by_color[tuple(color)].append(label.name)
# check label colors
for color, names in labels_by_color.items():
if color == no_color_rgb:
continue
if color == (0, 0, 0):
# we cannot have an all-zero color, otherw. e.g. tksurfer
# refuses to read the parcellation
msg = ('At least one label contains a color with, "r=0, '
'g=0, b=0" value. Some FreeSurfer tools may fail '
'to read the parcellation')
logger.warning(msg)
if any(i > 255 for i in color):
msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
invalid_colors.append(msg)
if len(names) > 1:
msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
duplicate_colors.append(msg)
# replace None values (labels with unspecified color)
if labels_by_color[no_color_rgb]:
default_colors = _n_colors(n_hemi_labels, bytes_=True,
cmap=colormap)
# keep track of colors known to be in hemi_colors :
safe_color_i = 0
for i in xrange(n_hemi_labels):
if ctab[i, 0] == -1:
color = default_colors[i]
# make sure to add no duplicate color
while np.any(np.all(color[:3] == ctab_rgb, 1)):
color = default_colors[safe_color_i]
safe_color_i += 1
# assign the color
ctab[i] = color
# find number of vertices in surface
if subject is not None and subjects_dir is not None:
fpath = os.path.join(subjects_dir, subject, 'surf',
'%s.white' % hemi)
points, _ = read_surface(fpath)
n_vertices = len(points)
else:
if len(hemi_labels) > 0:
max_vert = max(np.max(label.vertices) for label in hemi_labels)
n_vertices = max_vert + 1
else:
n_vertices = 1
msg = (' Number of vertices in the surface could not be '
'verified because the surface file could not be found; '
'specify subject and subjects_dir parameters.')
logger.warning(msg)
# Create annot and color table array to write
annot = np.empty(n_vertices, dtype=np.int)
annot[:] = -1
# create the annotation ids from the colors
annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
for label, annot_id in zip(hemi_labels, annot_ids):
# make sure the label is not overwriting another label
if np.any(annot[label.vertices] != -1):
other_ids = set(annot[label.vertices])
other_ids.discard(-1)
other_indices = (annot_ids.index(i) for i in other_ids)
other_names = (hemi_labels[i].name for i in other_indices)
other_repr = ', '.join(other_names)
msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
overlap.append(msg)
annot[label.vertices] = annot_id
hemi_names = [label.name for label in hemi_labels]
if None in hemi_names:
msg = ("Found %i labels with no name. Writing annotation file"
"requires all labels named" % (hemi_names.count(None)))
# raise the error immediately rather than crash with an
# uninformative error later (e.g. cannot join NoneType)
raise ValueError(msg)
# Assign unlabeled vertices to an "unknown" label
unlabeled = (annot == -1)
if np.any(unlabeled):
msg = ("Assigning %i unlabeled vertices to "
"'unknown-%s'" % (unlabeled.sum(), hemi))
logger.info(msg)
# find an unused color (try shades of gray first)
for i in range(1, 257):
if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
break
if i < 256:
color = (i, i, i, 0)
else:
err = ("Need one free shade of gray for 'unknown' label. "
"Please modify your label colors, or assign the "
"unlabeled vertices to another label.")
raise ValueError(err)
# find the id
annot_id = np.sum(annot_id_coding * color[:3])
# update data to write
annot[unlabeled] = annot_id
ctab = np.vstack((ctab, color))
hemi_names.append("unknown")
# convert to FreeSurfer alpha values
ctab[:, 3] = 255 - ctab[:, 3]
# remove hemi ending in names
hemi_names = [name[:-3] if name.endswith(hemi) else name
for name in hemi_names]
to_save.append((fname, annot, ctab, hemi_names))
issues = []
if duplicate_colors:
msg = ("Some labels have the same color values (all labels in one "
"hemisphere must have a unique color):")
duplicate_colors.insert(0, msg)
issues.append(os.linesep.join(duplicate_colors))
if invalid_colors:
msg = ("Some labels have invalid color values (all colors should be "
"RGBA tuples with values between 0 and 1)")
invalid_colors.insert(0, msg)
issues.append(os.linesep.join(invalid_colors))
if overlap:
msg = ("Some labels occupy vertices that are also occupied by one or "
"more other labels. Each vertex can only be occupied by a "
"single label in *.annot files.")
overlap.insert(0, msg)
issues.append(os.linesep.join(overlap))
if issues:
raise ValueError('\n\n'.join(issues))
# write it
for fname, annot, ctab, hemi_names in to_save:
logger.info(' writing %d labels to %s' % (len(hemi_names), fname))
_write_annot(fname, annot, ctab, hemi_names)
logger.info('[done]')
| bsd-3-clause |
stargaser/astropy | astropy/visualization/wcsaxes/transforms.py | 4 | 5662 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file incldues code dervived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.coordinates import (SkyCoord, frame_transform_graph,
UnitSphericalRepresentation,
BaseCoordinateFrame)
__all__ = ['CurvedTransform', 'CoordinateTransform',
'World2PixelTransform', 'Pixel2WorldTransform']
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, str):
self.input_system = frame_transform_graph.lookup_name(self._input_system_name)
if self.input_system is None:
raise ValueError(f"Frame {self._input_system_name} not found")
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, str):
self.output_system = frame_transform_graph.lookup_name(self._output_system_name)
if self.output_system is None:
raise ValueError(f"Frame {self._output_system_name} not found")
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords*u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
| bsd-3-clause |
xyguo/scikit-learn | sklearn/externals/joblib/parallel.py | 31 | 35665 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instanciation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
kmike/scikit-learn | examples/ensemble/plot_forest_importances.py | 4 | 1741 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artifical classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
import pylab as pl
pl.figure()
pl.title("Feature importances")
pl.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
pl.xticks(range(10), indices)
pl.xlim([-1, 10])
pl.show()
| bsd-3-clause |
McDermott-Group/LabRAD | LabRAD/Measurements/General/waveform.py | 1 | 22660 | # Copyright (C) 2015 Samuel Owen, Ivan Pechenezhskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module could be used to create the waveforms that are used to
populate the DAC boars. See the __main__ section of this file for
examples.
"""
import collections
import itertools
import warnings
import numpy as np
import scipy.signal as ss
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
from win32api import SetConsoleCtrlHandler
import labrad.units as units
def _flatten(iterable):
"""
De-nest a list of _WavePulses for convenience.
Input:
iterable: an iterable object.
Output:
list: de-nested list of _WavePulses.
"""
remainder = iter(iterable)
while True:
first = next(remainder)
if (isinstance(first, collections.Iterable) and
not isinstance(first, _WavePulse)):
remainder = itertools.chain(first, remainder)
else:
yield first
class _WavePulse():
"""
Base pulse class that contains shared methods.
"""
def _ns(self, time):
"""
Convert time to nanoseconds. Return an integer without any
units attached.
Input:
time: physical or numerical (in ns) time value.
Output:
time: numerical time value in ns.
"""
if isinstance(time, units.Value):
time = time['ns']
return int(np.round(time))
def _init_times(self, start=None, duration=None, end=None):
"""
Define the pulse start, end, and duration attributes.
Inputs:
start: start time of the pulse.
duration: duration of the pulse.
end: end time of the pulse.
Output:
None.
"""
if [start, duration, end].count(None) > 1:
raise ValueError("A pair of time parameters is required " +
"to define a pulse. These possible time " +
"parameters are 'start', 'duration', and 'end'.")
if start is not None:
self.start = self._ns(start)
if duration is not None:
self.duration = self._ns(duration)
if end is not None:
self.end = self._ns(end)
if start is None:
self.start = self.end - self.duration + 1
if duration is None:
self.duration = self.end - self.start + 1
if end is None:
self.end = self.start + self.duration - 1
if self.start > self.end + 1:
raise ValueError("The pulse ends before it starts: " +
"the pulse starts at " + str(self.start) + " ns " +
"and ends at " + str(self.end) + " ns.")
if self.end - self.start + 1 != self.duration:
raise ValueError("Inconsistent time parameters: the pulse" +
" starts at " + str(self.start) + " ns, its " +
"duration is " + str(self.duration) + " ns, while" +
" the pulse is expected to end at " +
str(self.end) + " ns.")
def _amplitude(self, amplitude):
"""
Process the amplitude (strip units from the amplitude value).
Input:
amplitude: amplitude of the pulse.
Output:
amplitude: amplitude of the pulse.
"""
if isinstance(amplitude, units.Value):
return amplitude[units.Unit(amplitude)]
else:
return float(amplitude)
def _harmonic(self, frequency, phase):
"""
Process the pulse frequency and phase.
Inputs:
frequency: frequency of the harmonic pulse.
phase: phase of the harmonic pulse.
Outputs:
frequency: frequency of the harmonic pulse.
phase: phase of the harmonic pulse.
"""
if isinstance(frequency, units.Value):
frequency = frequency['GHz']
else:
frequency = float(frequency)
if isinstance(phase, units.Value):
phase = phase['rad']
else:
phase = float(phase)
return frequency, phase
def _check_pulse(self):
"""
Check whether the pulse amplitudes are in -1.0 to 1.0 range.
Input:
None.
Output:
None.
"""
if any(abs(self.pulse) > 1):
raise ValueError('The pulse amplitude should not exceed 1.')
def after(self, time=0):
"""
Time point after the pulse.
Input:
time: time delay after this pulse in ns.
Output:
time: absolute time.
"""
return self.end + 1 + self._ns(time)
def before(self, time=0):
"""
Time point before the pulse.
Input:
time: time delay before this pulse in ns.
Output:
time: absolute time.
"""
return self.start - 1 - self._ns(time)
class DC(_WavePulse):
"""
DC pulse.
Inputs:
amplitude: amplitude of the dc pulse.
start: starting time of the dc pulse.
duration: length of the dc pulse.
end: ending time of the dc pulse.
"""
def __init__(self, amplitude=0, start=None, duration=None, end=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
self.pulse = np.full(self.duration, amplitude)
self._check_pulse()
class Sine(_WavePulse):
"""
Sine pulse.
Inputs:
amplitude: amplitude of the sine pulse (default: 0).
frequency: frequency of the sine pulse (default: 0 Hz).
phase: phase of the sine pulse (default: 0 rad).
offset: constant dc offset of the sine pulse (default: 0).
start: starting time of the sine pulse.
duration: length of the sine pulse.
end: ending time of the sine pulse.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
"""
def __init__(self, amplitude=0, frequency=0, phase=0, offset=0,
start=None, duration=None, end=None, phase_ref=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
frequency, phase = self._harmonic(frequency, phase)
offset = self._amplitude(offset)
if phase_ref is None:
t0 = 0
else:
t0 = self.start - self._ns(phase_ref)
t = np.linspace(t0, t0 + self.duration - 1, self.duration)
self.pulse = (offset + amplitude *
np.sin(2 * np.pi * frequency * t + phase))
self._check_pulse()
class Cosine(_WavePulse):
"""
Cosine pulse.
Inputs:
amplitude: amplitude of the cosine pulse (default: 0).
frequency: frequency of the cosine pulse (default: 0 Hz).
phase: phase of the cosine pulse (default: 0 rad).
offset: constant dc offset of the cosine pulse (default: 0).
start: starting time of the cosine pulse.
duration: length of the cosine pulse.
end: ending time of the cosine pulse.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
"""
def __init__(self, amplitude=0, frequency=0, phase=0, offset=0,
start=None, duration=None, end=None, phase_ref=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
frequency, phase = self._harmonic(frequency, phase)
offset = self._amplitude(offset)
if phase_ref is None:
t0 = 0
else:
t0 = self.start - self._ns(phase_ref)
t = np.linspace(t0, t0 + self.duration - 1, self.duration)
self.pulse = (offset + amplitude *
np.cos(2 * np.pi * frequency * t + phase))
self._check_pulse()
class Gaussian(_WavePulse):
"""
Gaussian window pulse. The pulse is truncated at about 1 per 2^14
level since the DACs have 14-bit resolution.
Inputs:
amplitude: amplitude of the gaussian pulse.
start: starting time of the gaussian pulse.
duration: length of the gaussian pulse.
end: ending time of the gaussian pulse.
"""
def __init__(self, amplitude=0, start=None, duration=None, end=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
sigma = (float(self.duration) - 1) / np.sqrt(112 * np.log(2))
self.pulse = amplitude * ss.gaussian(self.duration, sigma)
self._check_pulse()
class FromArray(_WavePulse):
"""
Generate a pulse from a numpy array. The start or end times can be
arbitrary, and the duration is derived automatically from the length
of the array
Inputs:
pulse_data: numpy array containing the pulse data in 1 ns
chunks.
start: starting time of the pulse.
end: ending time of the pulse.
"""
def __init__(self, pulse_data=[], start=None, end=None):
duration = len(pulse_data)
self._init_times(start, duration, end)
if isinstance(pulse_data, list):
pulse_data = np.array(pulse_data)
self.pulse = pulse_data
self._check_pulse()
class Waveform():
"""
Create a waveform from pulses.
The start of one pulse is expected to be one unit
(i.e. one nanosecond) after the end of the previous pulse
(i.e. pulse2.end - pulse1.start >= 1). Therefore, to make pulse B
start immediately after another pulse A initialize B.start to
(A.end + 1), or simply assign A.after() to B.start.
Inputs:
label: waveform label string.
args: arbitrarily long set of _WavePulses to create the waveform
from. To create a _WavePulse use one of the "public"
classes such as DC, Sine, Cosine, etc.
"""
def __init__(self, label='None', *args):
if not isinstance(label, str):
raise ValueError('Invalid waveform label.')
self.label = label
args = list(_flatten(args))
pulses = [arg for arg in args if isinstance(arg, _WavePulse)]
if len(pulses) > 0:
# Sort based on the start times.
for i in range(len(pulses))[::-1]:
for j in range(i):
if pulses[j].start > pulses[j + 1].start:
tmp = pulses[j + 1]
pulses[j + 1] = pulses[j]
pulses[j] = tmp
# Ensure there are no overlaps.
for i in range(len(pulses) - 1):
if pulses[i].end > pulses[i + 1].start:
raise ValueError("There are overlaps between " +
"the waveform pulses.")
# Loop through and fill unused spots with zeros.
pulses_filled = []
for i in range(len(pulses) - 1):
pulses_filled.append(pulses[i].pulse)
gap = pulses[i + 1].start - pulses[i].end
if gap > 1:
pulses_filled.append(np.zeros(gap - 1))
pulses_filled.append(pulses[len(pulses) - 1].pulse)
self.pulses = np.hstack(pulses_filled)
else:
self.pulses = np.array([0])
self.start = pulses[0].start
self.end = pulses[-1].end
self.duration = self.end - self.start + 1
def ECLDuringPulses(*args, **kwargs):
"""
Return _WavePulse to make ECL outputs go high during a set of
specified _WavePulses
Inputs:
args: set (or list) of _WavePulses during which an ECL pulse
should be generated.
pad_length: time before and after the pulses (default: 8 ns).
Output:
ECL: list of ECL _WavePulses.
"""
if 'pad_length' in kwargs:
if isinstance(kwargs['pad_length'], units.Value):
pad_length = kwargs['pad_length']['ns']
else:
pad_length = kwargs['pad_length']
try:
pad_length = int(np.round(pad_length))
except:
raise Exception("Invalid ECL pad length value.")
else:
pad_length = 8
args = list(_flatten(args))
pulses = [arg for arg in args if isinstance(arg, _WavePulse)]
ECL = []
for pulse in pulses:
ECL.append(DC(amplitude = 1,
start = pulse.before(pad_length),
end = pulse.after(pad_length)))
return ECL
def Harmonic(amplitude=0, frequency=0, phase=0,
cosine_offset=0, sine_offset=0,
start=None, duration=None, end=None, phase_ref=None):
"""
Return cosine and sine pulses.
Inputs:
amplitude: amplitude of the pulses (default: 0).
frequency: frequency of the pulses (default: 0 Hz).
phase: phase of the pulses (default: 0 rad).
cosine_offset: constant dc offset of the cosine pulse
(default: 0).
sine_offset: constant dc offset of the sine pulse
(default: 0).
start: starting time of the pulses.
duration: length of the pulses.
end: ending time of the pulses.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
Outputs:
sine: Sine pulse object.
cosine: Cosine pulse object.
"""
return (Cosine(amplitude, frequency, phase,
cosine_offset, start, duration, end, phase_ref),
Sine(amplitude, frequency, phase,
sine_offset, start, duration, end, phase_ref))
def wfs_dict(*args, **kwargs):
"""
Return a waveform dictionary with the waveform labels as the keys.
Align the waveforms using the waveform starting time. Ensure that
the waveforms are of an equal length. The waveforms are zero-padded
at the start and the end to ensure that they are not shorter than
the minimum allowed length.
Inputs:
*args: arbitrarily long set of the Waveforms (instances of class
Waveforms).
*kwargs:
min_length: minimum allowed length of the final waveform.
Short waveforms are padded with zeros at the end
to increase their length (default: 20).
start_zeros: number of zeros to add to the start of each
waveform (default: 4).
end_zeros: number of zeros to add to the end of each
waveform (default: 4). Actual number of zeros added may
be higher if the waveform length does not satisfy
the min_length requirement.
Outputs:
waveforms: dictionary with the processed waveforms.
offset: difference between the corresponding index values
of the waveform numpy ndarrays and the time values that
specify the start and end times for the waveforms:
offset = ndarray_index - assigned_time_value, i.e.
ndarray_index = assigned_time_value + offset.
"""
defaults = {'min_length': 20, 'start_zeros': 4, 'end_zeros': 4}
for key in kwargs:
if isinstance(kwargs[key], units.Value):
kwargs[key] = kwargs[key]['ns']
try:
kwargs[key] = int(np.round(kwargs[key]))
except:
raise Exception("Invalid parameter '%s' value." %key)
defaults.update(kwargs)
min_len = defaults['min_length']
start, end = defaults['start_zeros'], defaults['end_zeros']
wfs = [arg for arg in args if isinstance(arg, Waveform)]
# Align the waveforms.
if wfs:
start_offset = min([wf.start for wf in wfs])
for wf in wfs:
wf.pulses = np.hstack([np.zeros(wf.start - start_offset),
wf.pulses])
else:
start_offset = 0
# Create an empty waveform 'None'.
wfs.append(Waveform('None', DC(start=start_offset, duration=1)))
# Ensure that the waveforms are long enough and of an equal length.
max_len = max([wf.pulses.size for wf in wfs]) + start + end
total_len = max(min_len, max_len)
for wf in wfs:
fin = max(total_len - start - wf.pulses.size, end)
wf.pulses = np.hstack([np.zeros(start), wf.pulses, np.zeros(fin)])
return {wf.label: wf.pulses for wf in wfs}, start - start_offset
def check_wfs(waveforms):
"""
Check that all waveforms have the same length.
Input:
waveforms: dictionary with the processed waveforms.
Output:
None.
"""
lengths = [waveforms[wf].size for wf in waveforms]
if lengths.count(lengths[0]) != len(lengths):
raise Exception('The waveform have different lengths.')
def _close_figure(self, signal=None):
"""
Close the waveform figure.
Input:
None.
Output:
None.
"""
plt.close(2)
def plot_wfs(waveforms, wf_labels, wf_colors=['r', 'g', 'm', 'b', 'k', 'c']):
"""
Plot waveforms.
Input:
waveforms: dictionary with the processed waveforms.
wf_labels: waveform labels to plot.
wf_colors: colors for waveform colorcoding.
Output:
None.
"""
if not isinstance(wf_colors, list):
wf_colors = list(wf_colors)
if not isinstance(wf_labels, list):
wf_labels = list(wf_labels)
time = waveforms[wf_labels[0]].size
time = np.linspace(0, time - 1, time)
plt.figure(2)
plt.ioff()
plt.clf()
for idx, wf in enumerate(wf_labels):
plt.plot(time, waveforms[wf], wf_colors[idx % 6],
label=wf_labels[idx])
plt.xlim(time[0], time[-1])
plt.legend()
plt.xlabel('Time [ns]')
plt.ylabel('Waveforms')
plt.draw()
plt.pause(0.05)
if __name__ == "__main__":
"""
Tests and examples. Add your test/example!
"""
# Explicitly close the waveform figure when the terminal is closed.
SetConsoleCtrlHandler(_close_figure, True)
# Cosine pulse with amplitude of 1 and frequency of 0.25 GHz
# starting at t = 2 ns and ending at t = 8 ns.
pulseA1 = Cosine(amplitude=1, frequency=0.25, start=2, end=8)
# Sine pulse with amplitude of 0.5 and frequency of 0.25 GHz
# starting at the start of pulseA1 and ending at the end of pulseA1.
pulseB1 = Sine(amplitude=0.5, frequency=0.25,
start=pulseA1.start, end=pulseA1.end)
# DC pulse with amplitude of -1 starting after the end of pulseA1.
# The pulse duration is 10 ns.
pulseB2 = DC(amplitude=-1, start=pulseA1.after(), duration=10)
# Combine the two pulses into one waveform. The waveform class
# automatically puts the wave pulses in the correct order.
waveformB = Waveform('B', pulseB1, pulseB2)
# Specifying the start, duration and end times at the same time will
# work only if these parameters are consistent, i.e. if the equation
# self.duration = self.end - self.start + 1 is satisfied.
pulseA2 = DC(start=pulseB2.start, duration=10, end=pulseB2.end)
try:
# Inconsistent specifications.
pulseA2 = DC(start=pulseB2.after(-1), duration=12, end=pulseB2.end)
except ValueError:
print('The inconsistent time error has been correctly caught.')
try:
# Amplitude should not exceed 1.
pulseA2 = Sine(amplitude=1, frequency=.25, offset=.1,
start=pulseB2.after(-1), duration=12)
except ValueError:
print('The amplitude error has been correctly caught.')
# Sine pulse with amplitude of 1 and frequency of 0.1 GHz
# starting 2 ns after pulseB1 and ending at the same time as
# pulseB2.
pulseA2 = Sine(amplitude=1, phase=np.pi/2, frequency=0.1,
start=pulseB1.after(2), end=pulseB2.end)
# Combine the two pulses into one waveform. The waveform class
# automatically puts the wave pulses in the correct order.
waveformA = Waveform('A', pulseA1, pulseA2)
# Create a waveform dictionary with the waveform labels as the keys.
# The waveforms will be aligned based on their start times. They
# will be zero-padded to ensure equal length that is longer than
# a minimum length, which is 20 in this example.
wfs, time_offset = wfs_dict(waveformA, waveformB, min_length=20)
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Gaussian pulse with amplitude of 1 starting at t = 0 ns and
# ending at t = 14 ns (duration is equal to 15 ns).
pulseC = Gaussian(amplitude=1, start=0, duration=15, end=14)
waveformC = Waveform('C', pulseC)
wfs, time_offset = wfs_dict(waveformA, waveformB, waveformC,
min_length=100)
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Create an in-phase and quadrature components of a harmonic pulse.
I, Q = Harmonic(amplitude=0.25, frequency=0.05, start=0,
duration=150)
wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q))
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Plot the waveforms for inspection.
plot_wfs(wfs, ['I', 'Q'], ['r', 'b'])
# Some animation.
for x in range(100):
# Create an in-phase and quadrature components of a harmonic
# pulse.
I, Q = Harmonic(amplitude=0.25, frequency=0.03, phase= x / 20,
start=0, duration=150)
wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q))
# Plot the waveforms for inspection.
plot_wfs(wfs, ['I', 'Q'], ['r', 'b']) | gpl-2.0 |
tjhei/burnman_old | example_composition.py | 1 | 5643 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
This example shows how to create different minerals, how to compute seismic
velocities, and how to compare them to a seismic reference model.
requires:
- geotherms
- seismic models
- compute seismic velocities
teaches:
- creating minerals
- seismic comparison
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
#INPUT for method
""" choose 'slb' (finite-strain 2nd order sheer modulus, stixrude and lithgow-bertelloni, 2005)
or 'mgd' (mie-gruneisen-debeye, matas et al. 2007)
or 'bm' (birch-murnaghan, if you choose to ignore temperature (your choice in geotherm will not matter in this case))
or 'slb3 (finite-strain 3rd order shear modulus, stixrude and lithgow-bertelloni, 2005)"""
method = 'slb'
# To compute seismic velocities and other properties, we need to supply
# burnman with a list of minerals (phaes) and their molar abundances. Minerals
# are classes found in burnman.minerals and are derived from
# burnman.minerals.material.
# Here are a few ways to define phases and molar_abundances:
#Example 1: two simple fixed minerals
if True:
phases = [minerals.Murakami_fe_perovskite(), minerals.Murakami_fe_periclase_LS()]
amount_perovskite = 0.95
molar_abundances = [amount_perovskite, 1.0-amount_perovskite]
#Example 2: specify fixed iron content
if False:
phases = [minerals.mg_fe_perovskite(0.8), minerals.ferropericlase(0.8)]
amount_perovskite = 0.95
molar_abundances = [amount_perovskite, 1.0-amount_perovskite]
#Example 3: input weight percentages
#See comments in burnman/composition.py for references to partition coefficent calculation
if False:
weight_percents = {'Mg':0.213, 'Fe': 0.08, 'Si':0.27, 'Ca':0., 'Al':0.}
phase_fractions,relative_molar_percent = burnman.calculate_phase_percents(weight_percents)
iron_content = lambda p,t: burnman.calculate_partition_coefficient(p,t,relative_molar_percent)
phases = [minerals.mg_fe_perovskite_pt_dependent(iron_content,0), \
minerals.ferropericlase_pt_dependent(iron_content,1)]
molar_abundances = [phase_fractions['pv'],phase_fractions['fp']]
#Example 4: three materials
if False:
phases = [minerals.Murakami_fe_perovskite(), minerals.ferropericlase(0.5), minerals.stishovite()]
molar_abundances = [0.7, 0.2, 0.1]
#seismic model for comparison:
seismic_model = burnman.seismic.prem() # pick from .prem() .slow() .fast() (see burnman/seismic.py)
number_of_points = 20 #set on how many depth slices the computations should be done
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700, 2800, number_of_points)
#alternatively, we could use the values where prem is defined:
#depths = seismic_model.internal_depth_list()
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
geotherm = burnman.geotherm.brown_shankland
temperature = [geotherm(p) for p in seis_p]
for ph in phases:
ph.set_method(method)
print "Calculations are done for:"
for i in range(len(phases)):
print molar_abundances[i], " of phase", phases[i].to_string()
mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_mu = burnman.calculate_velocities(seis_p, temperature, phases, molar_abundances)
[rho_err,vphi_err,vs_err]=burnman.compare_with_seismic_model(mat_vs,mat_vphi,mat_rho,seis_vs,seis_vphi,seis_rho)
# PLOTTING
# plot vs
plt.subplot(2,2,1)
plt.plot(seis_p/1.e9,mat_vs,color='b',linestyle='-',marker='o',markerfacecolor='b',markersize=4,label='computation')
plt.plot(seis_p/1.e9,seis_vs,color='k',linestyle='-',marker='o',markerfacecolor='k',markersize=4,label='reference')
plt.title("Vs (km/s)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.ylim(5.1,7.6)
plt.legend(loc='lower right')
plt.text(40,7.3,"misfit= %3.3f" % vs_err)
# plot Vphi
plt.subplot(2,2,2)
plt.plot(seis_p/1.e9,mat_vphi,color='b',linestyle='-',marker='o',markerfacecolor='b',markersize=4)
plt.plot(seis_p/1.e9,seis_vphi,color='k',linestyle='-',marker='o',markerfacecolor='k',markersize=4)
plt.title("Vphi (km/s)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.ylim(7,12)
plt.text(40,11.5,"misfit= %3.3f" % vphi_err)
# plot density
plt.subplot(2,2,3)
plt.plot(seis_p/1.e9,mat_rho,color='b',linestyle='-',marker='o',markerfacecolor='b',markersize=4)
plt.plot(seis_p/1.e9,seis_rho,color='k',linestyle='-',marker='o',markerfacecolor='k',markersize=4)
plt.title("density (kg/m^3)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.text(40,4.3,"misfit= %3.3f" % rho_err)
plt.xlabel("Pressure (GPa)")
# plot geotherm
plt.subplot(2,2,4)
plt.plot(seis_p/1e9,temperature,color='r',linestyle='-',marker='o',markerfacecolor='r',markersize=4)
plt.title("Geotherm (K)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.xlabel("Pressure (GPa)")
plt.savefig("example_composition.png")
plt.show()
| gpl-2.0 |
aaalgo/cls | train.py | 1 | 8746 | #!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time
from tqdm import tqdm
import numpy as np
import cv2
import simplejson as json
from sklearn.metrics import accuracy_score, roc_auc_score
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.slim as slim
from tensorflow.python.ops import control_flow_ops
import picpac
import cls_nets as nets
augments = None
#from . config import *
#if os.path.exists('config.py'):
def print_red (txt):
print('\033[91m' + txt + '\033[0m')
def print_green (txt):
print('\033[92m' + txt + '\033[0m')
print(augments)
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('db', None, 'training db')
flags.DEFINE_string('val_db', None, 'validation db')
flags.DEFINE_integer('classes', 2, 'number of classes')
flags.DEFINE_string('mixin', None, 'mix-in training db')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_boolean('cache', True, '')
flags.DEFINE_string('augments', None, 'augment config file')
flags.DEFINE_integer('size', 224, '')
flags.DEFINE_integer('batch', 128, 'Batch size. ')
flags.DEFINE_integer('shift', 0, '')
flags.DEFINE_string('net', 'resnet_50', 'architecture')
flags.DEFINE_string('model', None, 'model directory')
flags.DEFINE_string('resume', None, 'resume training from this model')
flags.DEFINE_integer('max_to_keep', 100, '')
# optimizer settings
flags.DEFINE_float('lr', 0.02, 'Initial learning rate.')
flags.DEFINE_float('decay_rate', 0.95, '')
flags.DEFINE_float('decay_steps', 500, '')
#
flags.DEFINE_integer('epoch_steps', None, '')
flags.DEFINE_integer('max_epochs', 200, '')
flags.DEFINE_integer('ckpt_epochs', 10, '')
flags.DEFINE_integer('val_epochs', 10, '')
def cls_loss (logits, labels):
# cross-entropy
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
xe = tf.reduce_mean(xe, name='xe')
# accuracy
acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
acc = tf.reduce_mean(acc, name='acc')
# regularization
reg = tf.reduce_sum(tf.losses.get_regularization_losses())
reg = tf.identity(reg, name='re')
# loss
loss = tf.identity(xe + reg, name='lo')
return loss, [acc, xe, reg, loss]
def create_picpac_stream (db_path, is_training):
assert os.path.exists(db_path)
augments = []
shift = 0
if is_training:
shift = FLAGS.shift
if FLAGS.augments:
with open(FLAGS.augments, 'r') as f:
augments = json.loads(f.read())
print("Using augments:")
print(json.dumps(augments))
else:
augments = [
{"type": "augment.flip", "horizontal": True, "vertical": False},
]
config = {"db": db_path,
"loop": is_training,
"shuffle": is_training,
"reshuffle": is_training,
"annotate": False,
"channels": FLAGS.channels,
"stratify": is_training,
"dtype": "float32",
"batch": FLAGS.batch,
"cache": FLAGS.cache,
"transforms": augments + [
#{"type": "resize", "size": FLAGS.size},
{"type": "clip", "size": FLAGS.size, "shift": shift, "border_type": "replicate"},
]
}
if is_training and not FLAGS.mixin is None:
print("mixin support is incomplete in new picpac.")
assert os.path.exists(FLAGS.mixin)
config['mixin'] = FLAGS.mixin
config['mixin_group_reset'] = 0
config['mixin_group_delta'] = 1
pass
return picpac.ImageStream(config)
def main (_):
if FLAGS.model:
try:
os.makedirs(FLAGS.model)
except:
pass
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
# ground truth labels
Y = tf.placeholder(tf.int32, shape=(None, ), name="labels")
is_training = tf.placeholder(tf.bool, name="is_training")
# load network
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(2.5e-4)), \
slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=5e-4):
logits = getattr(nets, FLAGS.net)(X-127, is_training, FLAGS.classes)
# probability of class 1 -- not very useful if FLAGS.classes > 2
probs = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,1], [-1,1]), 1)
loss, metrics = cls_loss(logits, Y)
metric_names = [x.name[:-2] for x in metrics]
def format_metrics (avg):
return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(metric_names, list(avg))])
global_step = tf.train.create_global_step()
LR = tf.train.exponential_decay(FLAGS.lr, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True)
optimizer = tf.train.MomentumOptimizer(learning_rate=LR, momentum=0.9)
#optimizer = tf.train.AdamOptimizer(0.0001)
train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
stream = create_picpac_stream(FLAGS.db, True)
# load validation db
val_stream = None
if FLAGS.val_db:
val_stream = create_picpac_stream(FLAGS.val_db, False)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
epoch_steps = FLAGS.epoch_steps
if epoch_steps is None:
epoch_steps = (stream.size() + FLAGS.batch-1) // FLAGS.batch
best = 0
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if FLAGS.resume:
saver.restore(sess, FLAGS.resume)
global_start_time = time.time()
epoch = 0
while epoch < FLAGS.max_epochs:
start_time = time.time()
cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
progress = tqdm(range(epoch_steps), leave=False)
for _ in progress:
meta, images = stream.next()
feed_dict = {X: images, Y: meta.labels, is_training: True}
mm, _ = sess.run([metrics, train_op], feed_dict=feed_dict)
metrics_sum += np.array(mm) * images.shape[0]
cnt += images.shape[0]
metrics_txt = format_metrics(metrics_sum/cnt)
progress.set_description(metrics_txt)
pass
stop = time.time()
msg = 'train epoch=%d ' % epoch
msg += metrics_txt
msg += ' elapsed=%.3f time=%.3f ' % (stop - global_start_time, stop - start_time)
print_green(msg)
epoch += 1
if (epoch % FLAGS.val_epochs == 0) and val_stream:
lr = sess.run(LR)
# evaluation
Ys, Ps = [], []
cnt, metrics_sum = 0, np.array([0] * len(metrics), dtype=np.float32)
val_stream.reset()
progress = tqdm(val_stream, leave=False)
for meta, images in progress:
feed_dict = {X: images, Y: meta.labels, is_training: False}
p, mm = sess.run([probs, metrics], feed_dict=feed_dict)
metrics_sum += np.array(mm) * images.shape[0]
cnt += images.shape[0]
Ys.extend(list(meta.labels))
Ps.extend(list(p))
metrics_txt = format_metrics(metrics_sum/cnt)
progress.set_description(metrics_txt)
pass
assert cnt == val_stream.size()
avg = metrics_sum / cnt
if avg[0] > best:
best = avg[0]
msg = 'valid epoch=%d ' % (epoch-1)
msg += metrics_txt
if FLAGS.classes == 2:
# display scikit-learn metrics
Ys = np.array(Ys, dtype=np.int32)
Ps = np.array(Ps, dtype=np.float32)
msg += ' sk_acc=%.3f auc=%.3f' % (accuracy_score(Ys, Ps > 0.5), roc_auc_score(Ys, Ps))
pass
msg += ' lr=%.4f best=%.3f' % (lr, best)
print_red(msg)
#log.write('%d\t%s\t%.4f\n' % (epoch, '\t'.join(['%.4f' % x for x in avg]), best))
# model saving
if (epoch % FLAGS.ckpt_epochs == 0) and FLAGS.model:
ckpt_path = '%s/%d' % (FLAGS.model, epoch)
saver.save(sess, ckpt_path)
print('saved to %s.' % (step, ckpt_path))
pass
pass
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| mit |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_patch.py | 3 | 2306 | # (C) British Crown Copyright 2015 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import matplotlib
from matplotlib.path import Path
import pytest
import shapely.geometry as sgeom
import cartopy.mpl.patch as cpatch
class Test_path_to_geos(object):
def test_empty_polyon(self):
p = Path([[0, 0], [0, 0], [0, 0], [0, 0],
[1, 2], [1, 2], [1, 2], [1, 2]],
codes=[1, 2, 2, 79,
1, 2, 2, 79])
geoms = cpatch.path_to_geos(p)
assert [type(geom) for geom in geoms] == [sgeom.Point, sgeom.Point]
assert len(geoms) == 2
@pytest.mark.skipif(matplotlib.__version__ < '2.2.0',
reason='Paths may not be closed with old Matplotlib.')
def test_non_polygon_loop(self):
p = Path([[0, 10], [170, 20], [-170, 30], [0, 10]],
codes=[1, 2, 2, 2])
geoms = cpatch.path_to_geos(p)
assert [type(geom) for geom in geoms] == [sgeom.MultiLineString]
assert len(geoms) == 1
def test_polygon_with_interior_and_singularity(self):
# A geometry with two interiors, one a single point.
p = Path([[0, -90], [200, -40], [200, 40], [0, 40], [0, -90],
[126, 26], [126, 26], [126, 26], [126, 26], [126, 26],
[114, 5], [103, 8], [126, 12], [126, 0], [114, 5]],
codes=[1, 2, 2, 2, 79, 1, 2, 2, 2, 79, 1, 2, 2, 2, 79])
geoms = cpatch.path_to_geos(p)
assert [type(geom) for geom in geoms] == [sgeom.Polygon, sgeom.Point]
assert len(geoms[0].interiors) == 1
| lgpl-3.0 |
kirangonella/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
rs2/pandas | pandas/tests/series/test_subclass.py | 4 | 2084 | import numpy as np
import pandas as pd
import pandas._testing as tm
class TestSeriesSubclassing:
def test_indexing_sliced(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"))
res = s.loc[["a", "b"]]
exp = tm.SubclassedSeries([1, 2], index=list("ab"))
tm.assert_series_equal(res, exp)
res = s.iloc[[2, 3]]
exp = tm.SubclassedSeries([3, 4], index=list("cd"))
tm.assert_series_equal(res, exp)
res = s.loc[["a", "b"]]
exp = tm.SubclassedSeries([1, 2], index=list("ab"))
tm.assert_series_equal(res, exp)
def test_to_frame(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx")
res = s.to_frame()
exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd"))
tm.assert_frame_equal(res, exp)
def test_subclass_unstack(self):
# GH 15564
s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")])
res = s.unstack()
exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"])
tm.assert_frame_equal(res, exp)
def test_subclass_empty_repr(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
sub_series = tm.SubclassedSeries()
assert "SubclassedSeries" in repr(sub_series)
def test_asof(self):
N = 3
rng = pd.date_range("1/1/1990", periods=N, freq="53s")
s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng)
result = s.asof(rng[-2:])
assert isinstance(result, tm.SubclassedSeries)
def test_explode(self):
s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]])
result = s.explode()
assert isinstance(result, tm.SubclassedSeries)
def test_equals(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
s1 = pd.Series([1, 2, 3])
s2 = tm.SubclassedSeries([1, 2, 3])
assert s1.equals(s2)
assert s2.equals(s1)
| bsd-3-clause |
stephendade/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
JeromeRisselin/PRJ-medtec_sigproc | echopen-leaderboard/ENV/share/doc/networkx-1.11/examples/drawing/unix_email.py | 18 | 2670 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2005-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| mit |
VoigtLab/dnaplotlib | apps/quick.py | 1 | 4981 | #!/usr/bin/env python
"""
quick.py
Quickly Plot SBOL Designs
Usage:
------
python quick.py -input "p.gray p.lightblue i.lightred r.green c.orange t.purple -t.black -c.yellow -p.yellow" -output out.pdf
Allowed part types:
p: promoter
i: ribozyme
r: rbs
c: cds
t: terminator
s: spacer
=: scar
Reverse part direction by using '-' before the 1-letter part type
Allowed colors
black, gray, red, orange, yellow, green, blue, purple, lightred, lightorange,
lightyellow, lightgreen, lightblue, lightpurple, white
"""
# Set the backend to use (important for headless servers)
import matplotlib
matplotlib.use('Agg')
# Other modules we require
import argparse
import dnaplotlib as dpl
import matplotlib.pyplot as plt
__author__ = 'Thomas E. Gorochowski <[email protected]>, Voigt Lab, MIT\n\
Bryan Der <[email protected]>, Voigt Lab, MIT'
__license__ = 'MIT'
__version__ = '1.0'
def process_arguments (input):
# Types mapping
types = {}
types['p'] = 'Promoter'
types['i'] = 'Ribozyme'
types['r'] = 'RBS'
types['c'] = 'CDS'
types['t'] = 'Terminator'
types['s'] = 'Spacer'
types['='] = 'Scar'
# Colours mapping
colors = {}
colors['white'] = (1.00,1.00,1.00)
colors['black'] = (0.00,0.00,0.00)
colors['gray'] = (0.60,0.60,0.60)
colors['red'] = (0.89,0.10,0.11)
colors['orange'] = (1.00,0.50,0.00)
colors['yellow'] = (1.00,1.00,0.00)
colors['green'] = (0.20,0.63,0.17)
colors['blue'] = (0.12,0.47,0.71)
colors['purple'] = (0.42,0.24,0.60)
colors['lightred'] = (0.98,0.60,0.60)
colors['lightorange'] = (0.99,0.75,0.44)
colors['lightyellow'] = (1.00,1.00,0.60)
colors['lightgreen'] = (0.70,0.87,0.54)
colors['lightblue'] = (0.65,0.81,0.89)
colors['lightpurple'] = (0.79,0.70,0.84)
# Generate the parts list from the arguments
part_list = []
part_idx = 1
for el in input.split(' '):
if el != '':
part_parts = el.split('.')
# Only type and colour provided
if len(part_parts) == 2:
part_short_type = part_parts[0]
part_fwd = True
if part_short_type[0] == '-':
part_fwd = False
part_short_type = part_short_type[1:]
if part_short_type in list(types.keys()):
part_type = types[part_short_type]
part_color = part_parts[1]
part_rgb = (0,0,0)
if part_color in list(colors.keys()):
part_rgb = colors[part_color]
part_list.append( {'name' : str(part_idx),
'type' : part_type,
'fwd' : part_fwd,
'opts' : {'color': part_rgb}} )
# Type, label and colour provided
if len(part_parts) == 3:
part_short_type = part_parts[0]
part_fwd = True
if part_short_type[0] == '-':
part_fwd = False
part_short_type = part_short_type[1:]
if part_short_type in list(types.keys()):
part_type = types[part_short_type]
part_label = part_parts[1]
part_color = part_parts[2]
part_rgb = (0,0,0)
if part_color in list(colors.keys()):
part_rgb = colors[part_color]
part_list.append( {'name' : str(part_idx),
'type' : part_type,
'fwd' : part_fwd,
'opts' : {'color': part_rgb,
'label': part_label,
'label_size': 8,
'label_y_offset': -17}} )
return part_list
def main():
# Parse the command line inputs
parser = argparse.ArgumentParser(description="one line quick plot")
parser.add_argument("-input", dest="input", required=True, help="\"p.gray p.lightblue i.lightred r.green c.orange t.purple -t.black -c.yellow -p.yellow\"", metavar="string")
parser.add_argument("-output", dest="output", required=False, help="output pdf filename")
args = parser.parse_args()
# Process the arguments
design = process_arguments(args.input)
# Create objects for plotting (dnaplotlib)
dr = dpl.DNARenderer(linewidth=1.15, backbone_pad_left=3, backbone_pad_right=3)
reg_renderers = dr.std_reg_renderers()
part_renderers = dr.SBOL_part_renderers()
regs = None
# Generate the figure
fig = plt.figure(figsize=(5.0,5.0))
ax = fig.add_subplot(1,1,1)
# Plot the design
dna_start, dna_end = dr.renderDNA(ax, design, part_renderers, regs, reg_renderers)
max_dna_len = dna_end-dna_start
# Format the axis
ax.set_xticks([])
ax.set_yticks([])
# Set bounds
ax.set_xlim([(-0.01*max_dna_len),
max_dna_len+(0.01*max_dna_len)])
ax.set_ylim([-35,35])
ax.set_aspect('equal')
ax.set_axis_off()
# Update the size of the figure to fit the constructs drawn
fig_x_dim = max_dna_len/60.0
if fig_x_dim < 1.0:
fig_x_dim = 1.0
fig_y_dim = 1.2
plt.gcf().set_size_inches( (fig_x_dim, fig_y_dim) )
# Save the figure
plt.tight_layout()
fig.savefig(args.output, transparent=True, dpi=300)
# Enable the script to be run from the command line
if __name__ == "__main__":
main()
| mit |
btabibian/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 23 | 3376 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (Github issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
| bsd-3-clause |
liyi193328/seq2seq | seq2seq/contrib/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
PatrickChrist/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
robintw/scikit-image | doc/examples/plot_equalize.py | 18 | 2786 | """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.subplots_adjust(wspace=0.4)
plt.show()
| bsd-3-clause |
rushter/MLAlgorithms | mla/tests/test_reduction.py | 1 | 1161 | # coding=utf-8
import pytest
from sklearn.datasets import make_classification
from sklearn.metrics import roc_auc_score
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from mla.ensemble import RandomForestClassifier
from mla.pca import PCA
@pytest.fixture
def dataset():
# Generate a random binary classification problem.
return make_classification(
n_samples=1000, n_features=100, n_informative=75, random_state=1111, n_classes=2, class_sep=2.5
)
# TODO: fix
@pytest.mark.skip()
def test_PCA(dataset):
X, y = dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1111)
p = PCA(50, solver="eigen")
# fit PCA with training set, not the entire dataset
p.fit(X_train)
X_train_reduced = p.transform(X_train)
X_test_reduced = p.transform(X_test)
model = RandomForestClassifier(n_estimators=25, max_depth=5)
model.fit(X_train_reduced, y_train)
predictions = model.predict(X_test_reduced)[:, 1]
score = roc_auc_score(y_test, predictions)
assert score >= 0.75
| mit |
drammock/expyfun | expyfun/analyze/tests/test_viz.py | 2 | 4939 | import numpy as np
from os import path as op
import pytest
from numpy.testing import assert_equal
import expyfun.analyze as ea
from expyfun._utils import _TempDir, requires_lib
temp_dir = _TempDir()
def _check_warnings(w):
"""Silly helper to deal with MPL deprecation warnings."""
assert all(['expyfun' not in ww.filename for ww in w])
@requires_lib('pandas')
def test_barplot_with_pandas():
"""Test bar plot function pandas support."""
import pandas as pd
tmp = pd.DataFrame(np.arange(20).reshape((4, 5)),
columns=['a', 'b', 'c', 'd', 'e'],
index=['one', 'two', 'three', 'four'])
ea.barplot(tmp)
ea.barplot(tmp, axis=0, lines=True)
@pytest.fixture
def tmp_err(): # noqa
rng = np.random.RandomState(0)
tmp = np.ones(4) + rng.rand(4)
err = 0.1 + tmp / 5.
return tmp, err
def test_barplot_degenerate(tmp_err):
"""Test bar plot degenerate cases."""
import matplotlib.pyplot as plt
tmp, err = tmp_err
# too many data dimensions:
pytest.raises(ValueError, ea.barplot, np.arange(8).reshape((2, 2, 2)))
# gap_size > 1:
pytest.raises(ValueError, ea.barplot, tmp, gap_size=1.1)
# shape mismatch between data & error bars:
pytest.raises(ValueError, ea.barplot, tmp, err_bars=np.arange(3))
# bad err_bar string:
pytest.raises(ValueError, ea.barplot, tmp, err_bars='foo')
# cannot calculate 'sd' error bars with only 1 value per bar:
pytest.raises(ValueError, ea.barplot, tmp, err_bars='sd')
# mismatched lengths of brackets & bracket_text:
pytest.raises(ValueError, ea.barplot, tmp, brackets=[(0, 1)],
bracket_text=['foo', 'bar'])
# bad bracket spec:
pytest.raises(ValueError, ea.barplot, tmp, brackets=[(1,)],
bracket_text=['foo'])
plt.close('all')
def test_barplot_single(tmp_err):
"""Test with single data point and single error bar spec."""
import matplotlib.pyplot as plt
tmp, err = tmp_err
ea.barplot(2, err_bars=0.2)
plt.close('all')
@pytest.mark.timeout(15)
def test_barplot_single_spec(tmp_err):
"""Test with one data point per bar and user-specified err ranges."""
import matplotlib.pyplot as plt
tmp, err = tmp_err
_, axs = plt.subplots(1, 5, sharey=False)
ea.barplot(tmp, err_bars=err, brackets=([2, 3], [0, 1]), ax=axs[0],
bracket_text=['foo', 'bar'], bracket_inline=True)
ea.barplot(tmp, err_bars=err, brackets=((0, 2), (1, 3)), ax=axs[1],
bracket_text=['foo', 'bar'])
ea.barplot(tmp, err_bars=err, brackets=[[2, 1], [0, 3]], ax=axs[2],
bracket_text=['foo', 'bar'])
ea.barplot(tmp, err_bars=err, brackets=[(0, 1), (0, 2), (0, 3)],
bracket_text=['foo', 'bar', 'baz'], ax=axs[3])
ea.barplot(tmp, err_bars=err, brackets=[(0, 1), (2, 3), (0, 2), (1, 3)],
bracket_text=['foo', 'bar', 'baz', 'snafu'], ax=axs[4])
ea.barplot(tmp, groups=[[0, 1, 2], [3]], eq_group_widths=True,
brackets=[(0, 1), (1, 2), ([0, 1, 2], 3)],
bracket_text=['foo', 'bar', 'baz'],
bracket_group_lines=True)
plt.close('all')
@pytest.mark.timeout(10)
def test_barplot_multiple():
"""Test with multiple data points per bar and calculated ranges."""
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
tmp = (rng.randn(20) + np.arange(20)).reshape((5, 4)) # 2-dim
_, axs = plt.subplots(1, 4, sharey=False)
ea.barplot(tmp, lines=True, err_bars='sd', ax=axs[0], smart_defaults=False)
ea.barplot(tmp, lines=True, err_bars='ci', ax=axs[1], axis=0)
ea.barplot(tmp, lines=True, err_bars='se', ax=axs[2], ylim=(0, 30))
ea.barplot(tmp, lines=True, err_bars='se', ax=axs[3],
groups=[[0, 1, 2], [3, 4]], bracket_group_lines=True,
brackets=[(0, 1), (1, 2), (3, 4), ([0, 1, 2], [3, 4])],
bracket_text=['foo', 'bar', 'baz', 'snafu'])
extns = ['pdf'] # jpg, tif not supported; 'png', 'raw', 'svg' not tested
for ext in extns:
fname = op.join(temp_dir, 'temp.' + ext)
with pytest.warns(None) as w:
ea.barplot(tmp, groups=[[0, 1, 2], [3]], err_bars='sd', axis=0,
fname=fname)
plt.close()
_check_warnings(w)
plt.close('all')
def test_plot_screen():
"""Test screen plotting function."""
tmp = np.ones((10, 20, 2))
pytest.raises(ValueError, ea.plot_screen, tmp)
tmp = np.ones((10, 20, 3))
ea.plot_screen(tmp)
def test_format_pval():
"""Test p-value formatting."""
foo = ea.format_pval(1e-10, latex=False)
bar = ea.format_pval(1e-10, scheme='ross')
baz = ea.format_pval([0.2, 0.02])
qux = ea.format_pval(0.002, scheme='stars')
assert_equal(foo, 'p < 10^-9')
assert_equal(bar, '$p < 10^{{-9}}$')
assert_equal(baz[0], '$n.s.$')
assert_equal(qux, '${*}{*}$')
| bsd-3-clause |
google-research/torchsde | examples/latent_sde_lorenz.py | 1 | 12092 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a latent SDE on data from a stochastic Lorenz attractor.
Reproduce the toy example in Section 7.2 of https://arxiv.org/pdf/2001.01328.pdf
To run this file, first run the following to install extra requirements:
pip install fire
To run, execute:
python -m examples.latent_sde_lorenz
"""
import logging
import os
from typing import Sequence
import fire
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import torch
import tqdm
from torch import nn
from torch import optim
from torch.distributions import Normal
import torchsde
class LinearScheduler(object):
def __init__(self, iters, maxval=1.0):
self._iters = max(1, iters)
self._val = maxval / self._iters
self._maxval = maxval
def step(self):
self._val = min(self._maxval, self._val + self._maxval / self._iters)
@property
def val(self):
return self._val
class StochasticLorenz(object):
"""Stochastic Lorenz attractor.
Used for simulating ground truth and obtaining noisy data.
Details described in Section 7.2 https://arxiv.org/pdf/2001.01328.pdf
Default a, b from https://openreview.net/pdf?id=HkzRQhR9YX
"""
noise_type = "diagonal"
sde_type = "ito"
def __init__(self, a: Sequence = (10., 28., 8 / 3), b: Sequence = (.1, .28, .3)):
super(StochasticLorenz, self).__init__()
self.a = a
self.b = b
def f(self, t, y):
x1, x2, x3 = torch.split(y, split_size_or_sections=(1, 1, 1), dim=1)
a1, a2, a3 = self.a
f1 = a1 * (x2 - x1)
f2 = a2 * x1 - x2 - x1 * x3
f3 = x1 * x2 - a3 * x3
return torch.cat([f1, f2, f3], dim=1)
def g(self, t, y):
x1, x2, x3 = torch.split(y, split_size_or_sections=(1, 1, 1), dim=1)
b1, b2, b3 = self.b
g1 = x1 * b1
g2 = x2 * b2
g3 = x3 * b3
return torch.cat([g1, g2, g3], dim=1)
@torch.no_grad()
def sample(self, x0, ts, noise_std, normalize):
"""Sample data for training. Store data normalization constants if necessary."""
xs = torchsde.sdeint(self, x0, ts)
if normalize:
mean, std = torch.mean(xs, dim=(0, 1)), torch.std(xs, dim=(0, 1))
xs.sub_(mean).div_(std).add_(torch.randn_like(xs) * noise_std)
return xs
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Encoder, self).__init__()
self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size)
self.lin = nn.Linear(hidden_size, output_size)
def forward(self, inp):
out, _ = self.gru(inp)
out = self.lin(out)
return out
class LatentSDE(nn.Module):
sde_type = "ito"
noise_type = "diagonal"
def __init__(self, data_size, latent_size, context_size, hidden_size):
super(LatentSDE, self).__init__()
# Encoder.
self.encoder = Encoder(input_size=data_size, hidden_size=hidden_size, output_size=context_size)
self.qz0_net = nn.Linear(context_size, latent_size + latent_size)
# Decoder.
self.f_net = nn.Sequential(
nn.Linear(latent_size + context_size, hidden_size),
nn.Softplus(),
nn.Linear(hidden_size, hidden_size),
nn.Softplus(),
nn.Linear(hidden_size, latent_size),
)
self.h_net = nn.Sequential(
nn.Linear(latent_size, hidden_size),
nn.Softplus(),
nn.Linear(hidden_size, hidden_size),
nn.Softplus(),
nn.Linear(hidden_size, latent_size),
)
# This needs to be an element-wise function for the SDE to satisfy diagonal noise.
self.g_nets = nn.ModuleList(
[
nn.Sequential(
nn.Linear(1, hidden_size),
nn.Softplus(),
nn.Linear(hidden_size, 1),
nn.Sigmoid()
)
for _ in range(latent_size)
]
)
self.projector = nn.Linear(latent_size, data_size)
self.pz0_mean = nn.Parameter(torch.zeros(1, latent_size))
self.pz0_logstd = nn.Parameter(torch.zeros(1, latent_size))
self._ctx = None
def contextualize(self, ctx):
self._ctx = ctx # A tuple of tensors of sizes (T,), (T, batch_size, d).
def f(self, t, y):
ts, ctx = self._ctx
i = min(torch.searchsorted(ts, t, right=True), len(ts) - 1)
return self.f_net(torch.cat((y, ctx[i]), dim=1))
def h(self, t, y):
return self.h_net(y)
def g(self, t, y): # Diagonal diffusion.
y = torch.split(y, split_size_or_sections=1, dim=1)
out = [g_net_i(y_i) for (g_net_i, y_i) in zip(self.g_nets, y)]
return torch.cat(out, dim=1)
def forward(self, xs, ts, noise_std, adjoint=False, method="euler"):
# Contextualization is only needed for posterior inference.
ctx = self.encoder(torch.flip(xs, dims=(0,)))
ctx = torch.flip(ctx, dims=(0,))
self.contextualize((ts, ctx))
qz0_mean, qz0_logstd = self.qz0_net(ctx[0]).chunk(chunks=2, dim=1)
z0 = qz0_mean + qz0_logstd.exp() * torch.randn_like(qz0_mean)
if adjoint:
# Must use the argument `adjoint_params`, since `ctx` is not part of the input to `f`, `g`, and `h`.
adjoint_params = (
(ctx,) +
tuple(self.f_net.parameters()) + tuple(self.g_nets.parameters()) + tuple(self.h_net.parameters())
)
zs, log_ratio = torchsde.sdeint_adjoint(
self, z0, ts, adjoint_params=adjoint_params, dt=1e-2, logqp=True, method=method)
else:
zs, log_ratio = torchsde.sdeint(self, z0, ts, dt=1e-2, logqp=True, method=method)
_xs = self.projector(zs)
xs_dist = Normal(loc=_xs, scale=noise_std)
log_pxs = xs_dist.log_prob(xs).sum(dim=(0, 2)).mean(dim=0)
qz0 = torch.distributions.Normal(loc=qz0_mean, scale=qz0_logstd.exp())
pz0 = torch.distributions.Normal(loc=self.pz0_mean, scale=self.pz0_logstd.exp())
logqp0 = torch.distributions.kl_divergence(qz0, pz0).sum(dim=1).mean(dim=0)
logqp_path = log_ratio.sum(dim=0).mean(dim=0)
return log_pxs, logqp0 + logqp_path
@torch.no_grad()
def sample(self, batch_size, ts, bm=None):
eps = torch.randn(size=(batch_size, *self.pz0_mean.shape[1:]), device=self.pz0_mean.device)
z0 = self.pz0_mean + self.pz0_logstd.exp() * eps
zs = torchsde.sdeint(self, z0, ts, names={'drift': 'h'}, dt=1e-3, bm=bm)
# Most of the times in ML, we don't sample the observation noise for visualization purposes.
_xs = self.projector(zs)
return _xs
def make_dataset(t0, t1, batch_size, noise_std, train_dir, device):
data_path = os.path.join(train_dir, 'lorenz_data.pth')
if os.path.exists(data_path):
data_dict = torch.load(data_path)
xs, ts = data_dict['xs'], data_dict['ts']
logging.warning(f'Loaded toy data at: {data_path}')
if xs.shape[1] != batch_size:
raise ValueError("Batch size has changed; please delete and regenerate the data.")
if ts[0] != t0 or ts[-1] != t1:
raise ValueError("Times interval [t0, t1] has changed; please delete and regenerate the data.")
else:
_y0 = torch.randn(batch_size, 3, device=device)
ts = torch.linspace(t0, t1, steps=100, device=device)
xs = StochasticLorenz().sample(_y0, ts, noise_std, normalize=True)
os.makedirs(os.path.dirname(data_path), exist_ok=True)
torch.save({'xs': xs, 'ts': ts}, data_path)
logging.warning(f'Stored toy data at: {data_path}')
return xs, ts
def vis(xs, ts, latent_sde, bm_vis, img_path, num_samples=10):
fig = plt.figure(figsize=(20, 9))
gs = gridspec.GridSpec(1, 2)
ax00 = fig.add_subplot(gs[0, 0], projection='3d')
ax01 = fig.add_subplot(gs[0, 1], projection='3d')
# Left plot: data.
z1, z2, z3 = np.split(xs.cpu().numpy(), indices_or_sections=3, axis=-1)
[ax00.plot(z1[:, i, 0], z2[:, i, 0], z3[:, i, 0]) for i in range(num_samples)]
ax00.scatter(z1[0, :num_samples, 0], z2[0, :num_samples, 0], z3[0, :10, 0], marker='x')
ax00.set_yticklabels([])
ax00.set_xticklabels([])
ax00.set_zticklabels([])
ax00.set_xlabel('$z_1$', labelpad=0., fontsize=16)
ax00.set_ylabel('$z_2$', labelpad=.5, fontsize=16)
ax00.set_zlabel('$z_3$', labelpad=0., horizontalalignment='center', fontsize=16)
ax00.set_title('Data', fontsize=20)
xlim = ax00.get_xlim()
ylim = ax00.get_ylim()
zlim = ax00.get_zlim()
# Right plot: samples from learned model.
xs = latent_sde.sample(batch_size=xs.size(1), ts=ts, bm=bm_vis).cpu().numpy()
z1, z2, z3 = np.split(xs, indices_or_sections=3, axis=-1)
[ax01.plot(z1[:, i, 0], z2[:, i, 0], z3[:, i, 0]) for i in range(num_samples)]
ax01.scatter(z1[0, :num_samples, 0], z2[0, :num_samples, 0], z3[0, :10, 0], marker='x')
ax01.set_yticklabels([])
ax01.set_xticklabels([])
ax01.set_zticklabels([])
ax01.set_xlabel('$z_1$', labelpad=0., fontsize=16)
ax01.set_ylabel('$z_2$', labelpad=.5, fontsize=16)
ax01.set_zlabel('$z_3$', labelpad=0., horizontalalignment='center', fontsize=16)
ax01.set_title('Samples', fontsize=20)
ax01.set_xlim(xlim)
ax01.set_ylim(ylim)
ax01.set_zlim(zlim)
plt.savefig(img_path)
plt.close()
def main(
batch_size=1024,
latent_size=4,
context_size=64,
hidden_size=128,
lr_init=1e-2,
t0=0.,
t1=2.,
lr_gamma=0.997,
num_iters=5000,
kl_anneal_iters=1000,
pause_every=50,
noise_std=0.01,
adjoint=False,
train_dir='./dump/lorenz/',
method="euler",
):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
xs, ts = make_dataset(t0=t0, t1=t1, batch_size=batch_size, noise_std=noise_std, train_dir=train_dir, device=device)
latent_sde = LatentSDE(
data_size=3,
latent_size=latent_size,
context_size=context_size,
hidden_size=hidden_size,
).to(device)
optimizer = optim.Adam(params=latent_sde.parameters(), lr=lr_init)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=lr_gamma)
kl_scheduler = LinearScheduler(iters=kl_anneal_iters)
# Fix the same Brownian motion for visualization.
bm_vis = torchsde.BrownianInterval(
t0=t0, t1=t1, size=(batch_size, latent_size,), device=device, levy_area_approximation="space-time")
for global_step in tqdm.tqdm(range(1, num_iters + 1)):
latent_sde.zero_grad()
log_pxs, log_ratio = latent_sde(xs, ts, noise_std, adjoint, method)
loss = -log_pxs + log_ratio * kl_scheduler.val
loss.backward()
optimizer.step()
scheduler.step()
kl_scheduler.step()
if global_step % pause_every == 0:
lr_now = optimizer.param_groups[0]['lr']
logging.warning(
f'global_step: {global_step:06d}, lr: {lr_now:.5f}, '
f'log_pxs: {log_pxs:.4f}, log_ratio: {log_ratio:.4f} loss: {loss:.4f}, kl_coeff: {kl_scheduler.val:.4f}'
)
img_path = os.path.join(train_dir, f'global_step_{global_step:06d}.pdf')
vis(xs, ts, latent_sde, bm_vis, img_path)
if __name__ == "__main__":
fire.Fire(main)
| apache-2.0 |
Erotemic/ibeis | ibeis/scripts/getshark_old.py | 1 | 22490 | def get_injured_sharks():
"""
>>> from ibeis.scripts.getshark import * # NOQA
"""
import requests
url = 'http://www.whaleshark.org/getKeywordImages.jsp'
resp = requests.get(url)
assert resp.status_code == 200
keywords = resp.json()['keywords']
key_list = ut.take_column(keywords, 'indexName')
key_to_nice = {k['indexName']: k['readableName'] for k in keywords}
injury_patterns = [
'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite',
]
injury_keys = [key for key in key_list if any([pat in key for pat in injury_patterns])]
noninjury_keys = ut.setdiff(key_list, injury_keys)
injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys) # NOQA
noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys) # NOQA
key_list = injury_keys
keyed_images = {}
for key in ut.ProgIter(key_list, lbl='reading index', bs=True):
key_url = url + '?indexName={indexName}'.format(indexName=key)
key_resp = requests.get(key_url)
assert key_resp.status_code == 200
key_imgs = key_resp.json()['images']
keyed_images[key] = key_imgs
key_hist = {key: len(imgs) for key, imgs in keyed_images.items()}
key_hist = ut.sort_dict(key_hist, 'vals')
print(ut.repr3(key_hist))
nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist)
nice_key_hist = ut.sort_dict(nice_key_hist, 'vals')
print(ut.repr3(nice_key_hist))
key_to_urls = {key: ut.take_column(vals, 'url') for key, vals in keyed_images.items()}
overlaps = {}
import itertools
overlap_img_list = []
for k1, k2 in itertools.combinations(key_to_urls.keys(), 2):
overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2])
num_overlap = len(overlap_imgs)
overlaps[(k1, k2)] = num_overlap
overlaps[(k1, k1)] = len(key_to_urls[k1])
if num_overlap > 0:
#print('[%s][%s], overlap=%r' % (k1, k2, num_overlap))
overlap_img_list.extend(overlap_imgs)
all_img_urls = list(set(ut.flatten(key_to_urls.values())))
num_all = len(all_img_urls) # NOQA
print('num_all = %r' % (num_all,))
# Determine super-categories
categories = ['nicks', 'scar', 'trunc']
# Force these keys into these categories
key_to_cat = {'scarbite': 'other_injury'}
cat_to_keys = ut.ddict(list)
for key in key_to_urls.keys():
flag = 1
if key in key_to_cat:
cat = key_to_cat[key]
cat_to_keys[cat].append(key)
continue
for cat in categories:
if cat in key:
cat_to_keys[cat].append(key)
flag = 0
if flag:
cat = 'other_injury'
cat_to_keys[cat].append(key)
cat_urls = ut.ddict(list)
for cat, keys in cat_to_keys.items():
for key in keys:
cat_urls[cat].extend(key_to_urls[key])
cat_hist = {}
for cat in list(cat_urls.keys()):
cat_urls[cat] = list(set(cat_urls[cat]))
cat_hist[cat] = len(cat_urls[cat])
print(ut.repr3(cat_to_keys))
print(ut.repr3(cat_hist))
key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals])
#ingestset = {
# '__class__': 'ImageSet',
# 'images': ut.ddict(dict)
#}
#for key, key_imgs in keyed_images.items():
# for imgdict in key_imgs:
# url = imgdict['url']
# encid = imgdict['correspondingEncounterNumber']
# # Make structure
# encdict = encounters[encid]
# encdict['__class__'] = 'Encounter'
# imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber'])
# imgdict['__class__'] = 'Image'
# cat = key_to_cat[key]
# annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]}
# annotdict['__class__'] = 'Annotation'
# # Ensure structures exist
# encdict['images'] = encdict.get('images', [])
# imgdict['annots'] = imgdict.get('annots', [])
# # Add an image to this encounter
# encdict['images'].append(imgdict)
# # Add an annotation to this image
# imgdict['annots'].append(annotdict)
##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111
#get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,)
#resp = requests.get(get_enc_url)
#print(ut.repr3(encdict))
#print(ut.repr3(encounters))
# Download the files to the local disk
#fpath_list =
all_urls = ut.unique(ut.take_column(
ut.flatten(
ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values()
), 'url'))
dldir = ut.truepath('~/tmpsharks')
from os.path import commonprefix, basename # NOQA
prefix = commonprefix(all_urls)
suffix_list = [url_[len(prefix):] for url_ in all_urls]
fname_list = [suffix.replace('/', '--') for suffix in suffix_list]
fpath_list = []
for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1):
fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False)
fpath_list.append(fpath)
# Make sure we keep orig info
#url_to_keys = ut.ddict(list)
url_to_info = ut.ddict(dict)
for key, imgdict_list in keyed_images.items():
for imgdict in imgdict_list:
url = imgdict['url']
info = url_to_info[url]
for k, v in imgdict.items():
info[k] = info.get(k, [])
info[k].append(v)
info['keys'] = info.get('keys', [])
info['keys'].append(key)
#url_to_keys[url].append(key)
info_list = ut.take(url_to_info, all_urls)
for info in info_list:
if len(set(info['correspondingEncounterNumber'])) > 1:
assert False, 'url with two different encounter nums'
# Combine duplicate tags
hashid_list = [ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True)]
groupxs = ut.group_indices(hashid_list)[1]
# Group properties by duplicate images
#groupxs = [g for g in groupxs if len(g) > 1]
fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0)
url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0)
info_list_ = [ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_))
for info_ in ut.apply_grouping(info_list, groupxs)]
encid_list_ = [ut.unique(info_['correspondingEncounterNumber'])[0]
for info_ in info_list_]
keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_]
cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_]
clist = ut.ColumnLists({
'gpath': fpath_list_,
'url': url_list_,
'encid': encid_list_,
'key': keys_list_,
'cat': cats_list_,
})
#for info_ in ut.apply_grouping(info_list, groupxs):
# info = ut.dict_accum(*info_)
# info = ut.map_dict_vals(ut.flatten, info)
# x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber']))
# if len(x) > 1:
# info = info.copy()
# del info['keys']
# print(ut.repr3(info))
flags = ut.lmap(ut.fpath_has_imgext, clist['gpath'])
clist = clist.compress(flags)
import ibeis
ibs = ibeis.opendb('WS_Injury', allow_newdir=True)
gid_list = ibs.add_images(clist['gpath'])
clist['gid'] = gid_list
failed_flags = ut.flag_None_items(clist['gid'])
print('# failed %s' % (sum(failed_flags)),)
passed_flags = ut.not_list(failed_flags)
clist = clist.compress(passed_flags)
ut.assert_all_not_None(clist['gid'])
#ibs.get_image_uris_original(clist['gid'])
ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True)
#ut.zipflat(clist['cat'], clist['key'])
if False:
# Can run detection instead
clist['tags'] = ut.zipflat(clist['cat'])
aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01,
tags_list=clist['tags'])
aid_list
import plottool_ibeis as pt
from ibeis import core_annots
pt.qt4ensure()
#annots = ibs.annots()
#aids = [1, 2]
#ibs.depc_annot.get('hog', aids , 'hog')
#ibs.depc_annot.get('chip', aids, 'img')
for aid in ut.InteractiveIter(ibs.get_valid_aids()):
hogs = ibs.depc_annot.d.get_hog_hog([aid])
chips = ibs.depc_annot.d.get_chips_img([aid])
chip = chips[0]
hogimg = core_annots.make_hog_block_image(hogs[0])
pt.clf()
pt.imshow(hogimg, pnum=(1, 2, 1))
pt.imshow(chip, pnum=(1, 2, 2))
fig = pt.gcf()
fig.show()
fig.canvas.draw()
#print(len(groupxs))
#if False:
#groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values()
#print(ut.repr3(ut.apply_grouping(all_urls, groupxs)))
# # FIX
# for fpath, fname in zip(fpath_list, fname_list):
# if ut.checkpath(fpath):
# ut.move(fpath, join(dirname(fpath), fname))
# print('fpath = %r' % (fpath,))
#import ibeis
#from ibeis.dbio import ingest_dataset
#dbdir = ibeis.sysres.lookup_dbdir('WS_ALL')
#self = ingest_dataset.Ingestable2(dbdir)
if False:
# Show overlap matrix
import plottool_ibeis as pt
import pandas as pd
import numpy as np
dict_ = overlaps
s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps))
df = s.unstack()
lhs, rhs = df.align(df.T)
df = lhs.add(rhs, fill_value=0).fillna(0)
label_texts = df.columns.values
def label_ticks(label_texts):
import plottool_ibeis as pt
truncated_labels = [repr(lbl[0:100]) for lbl in label_texts]
ax = pt.gca()
ax.set_xticks(list(range(len(label_texts))))
ax.set_xticklabels(truncated_labels)
[lbl.set_rotation(-55) for lbl in ax.get_xticklabels()]
[lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()]
#xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts)))
#pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
ax.set_yticks(list(range(len(label_texts))))
ax.set_yticklabels(truncated_labels)
[lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()]
[lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()]
#[lbl.set_rotation(20) for lbl in ax.get_yticklabels()]
#df = df.sort(axis=0)
#df = df.sort(axis=1)
sortx = np.argsort(df.sum(axis=1).values)[::-1]
df = df.take(sortx, axis=0)
df = df.take(sortx, axis=1)
fig = pt.figure(fnum=1)
fig.clf()
mat = df.values.astype(np.int32)
mat[np.diag_indices(len(mat))] = 0
vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max()
import matplotlib.colors
norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True)
pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none')
pt.plt.colorbar()
pt.plt.grid('off')
label_ticks(label_texts)
fig.tight_layout()
#overlap_df = pd.DataFrame.from_dict(overlap_img_list)
class TmpImage(ut.NiceRepr):
pass
from skimage.feature import hog
from skimage import data, color, exposure
import plottool_ibeis as pt
image2 = color.rgb2gray(data.astronaut()) # NOQA
fpath = './GOPR1120.JPG'
import vtool_ibeis as vt
for fpath in [fpath]:
"""
http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
"""
image = vt.imread(fpath, grayscale=True)
image = pt.color_funcs.to_base01(image)
fig = pt.figure(fnum=2)
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=pt.plt.cm.gray)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
ax1.set_adjustable('box-forced')
pt.plt.show()
#for
def detect_sharks(ibs, gids):
#import ibeis
#ibs = ibeis.opendb('WS_ALL')
config = {
'algo' : 'yolo',
'sensitivity' : 0.2,
'config_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg'),
'weight_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.39000.weights'),
'class_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg.classes'),
}
depc = ibs.depc_image
#imgsets = ibs.imagesets(text='Injured Sharks')
#images = ibs.images(imgsets.gids[0])
images = ibs.images(gids)
images = images.compress([ext not in ['.gif'] for ext in images.exts])
gid_list = images.gids
# result is a tuple:
# (score, bbox_list, theta_list, conf_list, class_list)
results_list = depc.get_property('localizations', gid_list, None, config=config)
results_list2 = []
multi_gids = []
failed_gids = []
#ibs.set_image_imagesettext(failed_gids, ['Fixme'] * len(failed_gids))
ibs.set_image_imagesettext(multi_gids, ['Fixme2'] * len(multi_gids))
failed_gids
for gid, res in zip(gid_list, results_list):
score, bbox_list, theta_list, conf_list, class_list = res
if len(bbox_list) == 0:
failed_gids.append(gid)
elif len(bbox_list) == 1:
results_list2.append((gid, bbox_list, theta_list))
elif len(bbox_list) > 1:
multi_gids.append(gid)
idx = conf_list.argmax()
res2 = (gid, bbox_list[idx:idx + 1], theta_list[idx:idx + 1])
results_list2.append(res2)
ut.dict_hist(([t[1].shape[0] for t in results_list]))
localized_imgs = ibs.images(ut.take_column(results_list2, 0))
assert all([len(a) == 1 for a in localized_imgs.aids])
old_annots = ibs.annots(ut.flatten(localized_imgs.aids))
#old_tags = old_annots.case_tags
# Override old bboxes
import numpy as np
bboxes = np.array(ut.take_column(results_list2, 1))[:, 0, :]
ibs.set_annot_bboxes(old_annots.aids, bboxes)
if False:
import plottool_ibeis as pt
pt.qt4ensure()
inter = pt.MultiImageInteraction(
ibs.get_image_paths(ut.take_column(results_list2, 0)),
bboxes_list=ut.take_column(results_list2, 1)
)
inter.dump_to_disk('shark_loc', num=50, prefix='shark_loc')
inter.start()
inter = pt.MultiImageInteraction(ibs.get_image_paths(failed_gids))
inter.start()
inter = pt.MultiImageInteraction(ibs.get_image_paths(multi_gids))
inter.start()
def train_part_detector():
"""
Problem:
healthy sharks usually have a mostly whole body shot
injured sharks usually have a close up shot.
This distribution of images is likely what the injur-shark net is picking up on.
The goal is to train a detector that looks for things that look
like the distribution of injured sharks.
We will run this on healthy sharks to find the parts of
"""
import ibeis
ibs = ibeis.opendb('WS_ALL')
imgset = ibs.imagesets(text='Injured Sharks')
injured_annots = imgset.annots[0] # NOQA
#config = {
# 'dim_size': (224, 224),
# 'resize_dim': 'wh'
#}
from pydarknet import Darknet_YOLO_Detector
data_path = ibs.export_to_xml()
output_path = join(ibs.get_cachedir(), 'training', 'localizer')
ut.ensuredir(output_path)
dark = Darknet_YOLO_Detector()
results = dark.train(data_path, output_path)
del dark
localizer_weight_path, localizer_config_path, localizer_class_path = results
classifier_model_path = ibs.classifier_train()
labeler_model_path = ibs.labeler_train()
output_path = join(ibs.get_cachedir(), 'training', 'detector')
ut.ensuredir(output_path)
ut.copy(localizer_weight_path, join(output_path, 'localizer.weights'))
ut.copy(localizer_config_path, join(output_path, 'localizer.config'))
ut.copy(localizer_class_path, join(output_path, 'localizer.classes'))
ut.copy(classifier_model_path, join(output_path, 'classifier.npy'))
ut.copy(labeler_model_path, join(output_path, 'labeler.npy'))
# ibs.detector_train()
def purge_ensure_one_annot_per_images(ibs):
"""
pip install Pipe
"""
# Purge all but one annotation
images = ibs.images()
#images.aids
groups = images._annot_groups
import numpy as np
# Take all but the largest annotations per images
large_masks = [ut.index_to_boolmask([np.argmax(x)], len(x)) for x in groups.bbox_area]
small_masks = ut.lmap(ut.not_list, large_masks)
# Remove all but the largets annotation
small_aids = ut.zipcompress(groups.aid, small_masks)
small_aids = ut.flatten(small_aids)
# Fix any empty images
images = ibs.images()
empty_images = ut.where(np.array(images.num_annotations) == 0)
print('empty_images = %r' % (empty_images,))
#list(map(basename, map(dirname, images.uris_original)))
def VecPipe(func):
import pipe
@pipe.Pipe
def wrapped(sequence):
return map(func, sequence)
#return (None if item is None else func(item) for item in sequence)
return wrapped
name_list = list(images.uris_original | VecPipe(dirname) | VecPipe(basename))
aids_list = images.aids
ut.assert_all_eq(list(aids_list | VecPipe(len)))
annots = ibs.annots(ut.flatten(aids_list))
annots.names = name_list
def shark_misc():
import ibeis
ibs = ibeis.opendb('WS_ALL')
aid_list = ibs.get_valid_aids()
flag_list = ibs.get_annot_been_adjusted(aid_list)
adjusted_aids = ut.compress(aid_list, flag_list)
return adjusted_aids
#if False:
# # TRY TO FIGURE OUT WHY URLS ARE MISSING IN STEP 1
# encounter_to_parsed1 = parsed1.group_items('encounter')
# encounter_to_parsed2 = parsed2.group_items('encounter')
# url_to_parsed1 = parsed1.group_items('img_url')
# url_to_parsed2 = parsed2.group_items('img_url')
# def set_overlap(set1, set2):
# set1 = set(set1)
# set2 = set(set2)
# return ut.odict([
# ('s1', len(set1)),
# ('s2', len(set2)),
# ('isect', len(set1.intersection(set2))),
# ('union', len(set1.union(set2))),
# ('s1 - s2', len(set1.difference(set2))),
# ('s2 - s1', len(set2.difference(set1))),
# ])
# print('encounter overlap: ' + ut.repr3(set_overlap(encounter_to_parsed1, encounter_to_parsed2)))
# print('url overlap: ' + ut.repr3(set_overlap(url_to_parsed1, url_to_parsed2)))
# url1 = list(url_to_parsed1.keys())
# url2 = list(url_to_parsed2.keys())
# # remove common prefixes
# from os.path import commonprefix, basename # NOQA
# cp1 = commonprefix(url1)
# cp2 = commonprefix(url2)
# #suffix1 = sorted([u[len(cp1):].lower() for u in url1])
# #suffix2 = sorted([u[len(cp2):].lower() for u in url2])
# suffix1 = sorted([u[len(cp1):] for u in url1])
# suffix2 = sorted([u[len(cp2):] for u in url2])
# print('suffix overlap: ' + ut.repr3(set_overlap(suffix1, suffix2)))
# set1 = set(suffix1)
# set2 = set(suffix2)
# only1 = list(set1 - set1.intersection(set2))
# only2 = list(set2 - set1.intersection(set2))
# import numpy as np
# for suf in ut.ProgIter(only2, bs=True):
# dist = np.array(ut.edit_distance(suf, only1))
# idx = ut.argsort(dist)[0:3]
# if dist[idx][0] < 3:
# close = ut.take(only1, idx)
# print('---')
# print('suf = %r' % (join(cp2, suf),))
# print('close = %s' % (ut.repr3([join(cp1, c) for c in close]),))
# print('---')
# break
# # Associate keywords with original images
# #lower_urls = [x.lower() for x in parsed['img_url']]
# url_to_idx = ut.make_index_lookup(parsed1['img_url'])
# parsed1['keywords'] = [[] for _ in range(len(parsed1))]
# for url, keys in url_to_keys.items():
# # hack because urls are note in the same format
# url = url.replace('wildbook_data_dir', 'shepherd_data_dir')
# url = url.lower()
# if url in url_to_idx:
# idx = url_to_idx[url]
# parsed1['keywords'][idx].extend(keys)
#healthy_annots = ibs.annots(ibs.imagesets(text='Non-Injured Sharks').aids[0])
#ibs.set_annot_prop('healthy', healthy_annots.aids, [True] * len(healthy_annots))
#['healthy' in t and len(t) > 0 for t in single_annots.case_tags]
#healthy_tags = []
#ut.find_duplicate_items(cur_img_uuids)
#ut.find_duplicate_items(new_img_uuids)
#cur_uuids = set(cur_img_uuids)
#new_uuids = set(new_img_uuids)
#both_uuids = new_uuids.intersection(cur_uuids)
#only_cur = cur_uuids - both_uuids
#only_new = new_uuids - both_uuids
#print('len(cur_uuids) = %r' % (len(cur_uuids)))
#print('len(new_uuids) = %r' % (len(new_uuids)))
#print('len(both_uuids) = %r' % (len(both_uuids)))
#print('len(only_cur) = %r' % (len(only_cur)))
#print('len(only_new) = %r' % (len(only_new)))
# Ensure that data in both sets are syncronized
#images_both = []
#if False:
# print('Removing small images')
# import numpy as np
# import vtool_ibeis as vt
# imgsize_list = np.array([vt.open_image_size(gpath) for gpath in parsed['new_fpath']])
# sqrt_area_list = np.sqrt(np.prod(imgsize_list, axis=1))
# areq_flags_list = sqrt_area_list >= 750
# parsed = parsed.compress(areq_flags_list)
| apache-2.0 |
simpeg/simpeg | examples/07-fdem/plot_analytic_mag_dipole_wholespace.py | 1 | 1400 | """
EM: Magnetic Dipole in a Whole-Space
====================================
Here we plot the magnetic flux density from a harmonic dipole in a
wholespace.
"""
import numpy as np
from SimPEG import Utils
import SimPEG.EM as EM
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
def run(XYZ=None, loc=np.r_[0., 0., 0.], sig=1.0, freq=1.0, orientation='Z',
plotIt=True):
if XYZ is None:
# avoid putting measurement points where source is
x = np.arange(-100.5, 100.5, step=1.)
y = np.r_[0]
z = x
XYZ = Utils.ndgrid(x, y, z)
Bx, By, Bz = EM.Analytics.FDEM.MagneticDipoleWholeSpace(
XYZ,
loc,
sig,
freq,
orientation=orientation
)
absB = np.sqrt(Bx*Bx.conj()+By*By.conj()+Bz*Bz.conj()).real
if plotIt:
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
bxplt = Bx.reshape(x.size, z.size)
bzplt = Bz.reshape(x.size, z.size)
pc = ax.pcolor(x, z, absB.reshape(x.size, z.size), norm=LogNorm())
ax.streamplot(x, z, bxplt.real, bzplt.real, color='k', density=1)
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([z.min(), z.max()])
ax.set_xlabel('x')
ax.set_ylabel('z')
cb = plt.colorbar(pc, ax=ax)
cb.set_label('|B| (T)')
return fig, ax
if __name__ == '__main__':
run()
plt.show()
| mit |
zxc2694/ov_test | program/pythonGUI/gui3.py | 3 | 2637 | ################################################################################
# File name: gui.py
#
# Function: Display three data from stm32f4 using Python (matplotlib)
# The three data is roll, pith, yall angle of quadcopter attitude.
#
# Reference:http://electronut.in/plotting-real-time-data-from-arduino-using-python/
#
################################################################################
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.ax = deque([0.0]*maxLen)
self.ay = deque([0.0]*maxLen)
self.ayaw = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 3)
self.addToBuf(self.ax, data[0])
self.addToBuf(self.ay, data[1])
self.addToBuf(self.ayaw, data[2])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
plt.figure(figsize=(9,8))
self.axline, = plt.plot(analogData.ax,label="Roll",color="red")
self.ayline, = plt.plot(analogData.ay,label="Pitch",color="blue")
self.ayawline, = plt.plot(analogData.ayaw,label="Yaw",color="green")
plt.xlabel("Time")
plt.ylabel("Angle(-90~+90)")
plt.title("Quadcopter attitude")
plt.legend() #Show label figure.
plt.ylim([-120, 120]) # Vertical axis scale.
plt.grid()
# update plot
def update(self, analogData):
self.axline.set_ydata(analogData.ax)
self.ayline.set_ydata(analogData.ay)
self.ayawline.set_ydata(analogData.ayaw)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Type:"
print "sudo chmod 777 /dev/ttyUSB0"
print "python gui3.py '/dev/ttyUSB0'"
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # Horizontal axis scale.
analogPlot = AnalogPlot(analogData)
print "plotting data..."
a = 1
# open serial port
ser = serial.Serial(strPort, 9600)
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
if (a < 10):
a = a + 1
else:
print data[0] , data[1] , data[2]
if(len(data) == 3):
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
| mit |
fabianp/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
robertwb/incubator-beam | sdks/python/apache_beam/dataframe/pandas_docs_test.py | 6 | 4559 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for running the pandas docs (such as the users guide) against our
dataframe implementation.
Run as python -m apache_beam.dataframe.pandas_docs_test [getting_started ...]
"""
import argparse
import contextlib
import io
import multiprocessing
import os
import sys
import time
import urllib.request
import zipfile
from apache_beam.dataframe import doctests
PANDAS_VERSION = '1.1.1'
PANDAS_DIR = os.path.expanduser("~/.apache_beam/cache/pandas-" + PANDAS_VERSION)
PANDAS_DOCS_SOURCE = os.path.join(PANDAS_DIR, 'doc', 'source')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--parallel',
type=int,
default=0,
help='Number of tests to run in parallel. '
'Defaults to 0, meaning the number of cores on the machine.')
parser.add_argument('docs', nargs='*')
args = parser.parse_args()
if not os.path.exists(PANDAS_DIR):
# Download the pandas source.
os.makedirs(os.path.dirname(PANDAS_DIR), exist_ok=True)
zip = os.path.join(PANDAS_DIR + '.zip')
if not os.path.exists(zip):
url = (
'https://github.com/pandas-dev/pandas/archive/v%s.zip' %
PANDAS_VERSION)
print('Downloading', url)
with urllib.request.urlopen(url) as fin:
with open(zip + '.tmp', 'wb') as fout:
fout.write(fin.read())
os.rename(zip + '.tmp', zip)
print('Extracting', zip)
with zipfile.ZipFile(zip, 'r') as handle:
handle.extractall(os.path.dirname(PANDAS_DIR))
tests = args.docs or ['getting_started', 'user_guide']
paths = []
filters = []
# Explicit paths.
for test in tests:
if os.path.exists(test):
paths.append(test)
else:
filters.append(test)
# Names of pandas source files.
for root, _, files in os.walk(PANDAS_DOCS_SOURCE):
for name in files:
if name.endswith('.rst'):
path = os.path.join(root, name)
if any(filter in path for filter in filters):
paths.append(path)
# Using a global here is a bit hacky, but avoids pickling issues when used
# with multiprocessing.
parallelism = max(args.parallel or multiprocessing.cpu_count(), len(paths))
if parallelism > 1:
pool_map = multiprocessing.pool.Pool(parallelism).imap_unordered
run_tests = run_tests_capturing_stdout
# Make sure slow tests get started first.
paths.sort(
key=lambda path: ('enhancingperf' in path, os.path.getsize(path)),
reverse=True)
else:
pool_map = map
run_tests = run_tests_streaming_stdout
# Now run all the tests.
running_summary = doctests.Summary()
for count, (summary, stdout) in enumerate(pool_map(run_tests, paths)):
running_summary += summary
if stdout:
print(stdout)
print(count, '/', len(paths), 'done.')
print('*' * 72)
print("Final summary:")
running_summary.summarize()
def run_tests_capturing_stdout(path):
with deferred_stdout() as stdout:
return run_tests(path), stdout()
def run_tests_streaming_stdout(path):
return run_tests(path), None
def run_tests(path):
# Optionally capture the stdout as interleaved test errors are painful
# to debug. On the other hand, if there is no parallelism, let the
# output be streamed.
start = time.time()
with open(path) as f:
rst = f.read()
res = doctests.test_rst_ipython(
rst,
path,
report=True,
wont_implement_ok=['*'],
not_implemented_ok=['*'],
use_beam=False).summary
print("Total time for {}: {:.2f} secs".format(path, time.time() - start))
return res
@contextlib.contextmanager
def deferred_stdout():
captured = io.StringIO()
old_stdout, sys.stdout = sys.stdout, captured
yield captured.getvalue
sys.stdout = old_stdout
if __name__ == '__main__':
main()
| apache-2.0 |
samzhang111/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/sandbox/panel/panelmod.py | 27 | 14526 | """
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from __future__ import print_function
from statsmodels.compat.python import range, reduce
from statsmodels.tools.tools import categorical
from statsmodels.regression.linear_model import GLS, WLS
import numpy as np
__all__ = ["PanelModel"]
from pandas import LongPanel, __version__
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : array, (nobs, nre) or (nobs,)
array of group/category observations
sigma : array, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : array, (nobs, nobs)
covariance matrix of error
omegainv : array, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : array, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(LongPanel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
---------
endog : array-like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.LongPanel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data == None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, LongPanel):
# raise ValueError("Only pandas.LongPanel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: doesn't conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name == None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts == False and dummies == False:
return mean
elif counts == True and dummies == False:
return mean, dummy.sum(1)
elif counts == True and dummies == True:
return mean, dummy.sum(1), dummy
elif counts == False and dummies == True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
------
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle", "gls", "be",
"fe"]: # get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between \
estimator" % s)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
import pandas
from pandas import LongPanel
import statsmodels.api as sm
import numpy.lib.recfunctions as nprf
data = sm.datasets.grunfeld.load()
# Baltagi doesn't include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_panda = LongPanel.fromRecords(panel_arr, major_field='year',
minor_field='firm')
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=False)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation doesn't actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print(omega)
print(np.linalg.cholesky(omega))
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print(np.max(np.abs(omegacomp - omega)))
#check
#print(np.dot(omegainv,omega)
print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev shouldn't be column
print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print(np.max(np.abs(omega_ - omega)))
print(np.max(np.abs(omegainv_ - omegainv)))
print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print(np.max(np.abs(np.dot(Qgr, groups))))
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/feature_selection/tests/test_chi2.py | 49 | 3080 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import clean_warning_registry
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_array_equal(chi2.get_support(indices=True), [0])
assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_array_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chi2_unused_feature():
# Unused feature should evaluate to NaN
# and should issue no runtime warning
clean_warning_registry()
with warnings.catch_warnings(record=True) as warned:
warnings.simplefilter('always')
chi, p = chi2([[1, 0], [0, 0]], [1, 0])
for w in warned:
if 'divide by zero' in repr(w):
raise AssertionError('Found unexpected warning %s' % w)
assert_array_equal(chi, [1, np.nan])
assert_array_equal(p[1], np.nan)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| mit |
calliope-project/calliope | calliope/test/test_backend_pyomo_interface.py | 1 | 12194 | import pytest # noqa: F401
import pandas as pd
import pyomo.core as po
import calliope
import calliope.exceptions as exceptions
from calliope.test.common.util import build_test_model as build_model
from calliope.test.common.util import check_error_or_warning
@pytest.fixture(scope="class")
def model():
m = build_model({}, "simple_supply,two_hours,investment_costs")
m.run()
return m
class TestGetInputParams:
def test_get_input_params(self, model):
"""
Test that the function access_model_inputs works
"""
inputs = model.backend.access_model_inputs()
assert set(model.inputs.data_vars).symmetric_difference(inputs.data_vars) == {
"objective_cost_class"
}
class TestUpdateParam:
def test_update_param_single_dim(self, model):
"""
test that the function update_param works with a single dimension
"""
model.backend.update_param("energy_cap_max", {("b", "test_supply_elec"): 20})
assert (
model._backend_model.energy_cap_max.extract_values()[
("b", "test_supply_elec")
]
== 20
)
def test_update_param_multiple_vals(self, model):
"""
test that the function update_param works with a single dimension
"""
model.backend.update_param(
"energy_cap_max",
{("b", "test_supply_elec"): 20, ("a", "test_supply_elec"): 30},
)
assert (
model._backend_model.energy_cap_max.extract_values()[
("b", "test_supply_elec")
]
== 20
)
assert (
model._backend_model.energy_cap_max.extract_values()[
("a", "test_supply_elec")
]
== 30
)
def test_update_param_multiple_dim(self, model):
"""
test that the function update_param works with multiple dimensions
"""
time = model._backend_model.timesteps[1]
model.backend.update_param(
"resource",
{("a", "test_demand_elec", time): -10},
)
assert (
model._backend_model.resource.extract_values()[
("a", "test_demand_elec", time)
]
== -10
)
def test_unknown_param(self, model):
"""
Raise error on unknown param
"""
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.update_param("unknown_param", {("b", "test_supply_elec"): 20})
assert check_error_or_warning(
excinfo, "Parameter `unknown_param` not in the Pyomo Backend."
)
def test_not_a_param(self, model):
"""
Raise error when trying to update a non-Param Pyomo object
"""
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.update_param("energy_cap", {("b", "test_supply_elec"): 20})
assert check_error_or_warning(
excinfo, "`energy_cap` not a Parameter in the Pyomo Backend."
)
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.update_param("loc_techs", {("b", "test_supply_elec"): 20})
assert check_error_or_warning(
excinfo, "Parameter `loc_techs` not in the Pyomo Backend."
)
def index_not_in_param(self, model):
"""
Raise error when accessing unknown index
"""
with pytest.raises(KeyError, match=r"Index 'region1-xc1::csp'"):
model.backend.update_param(
"energy_cap_max", {("c", "test_supply_elec"): 20}
)
with pytest.raises(KeyError, match=r"Index 'region1-xc1::csp'"):
model.backend.update_param(
"energy_cap_max",
{("b", "test_supply_elec"): 20, ("c", "test_supply_elec"): 20},
)
class TestActivateConstraint:
def test_activate_constraint(self, model):
"""
test that the function activate_constraint works
"""
model.backend.activate_constraint("system_balance_constraint", active=False)
assert not model._backend_model.system_balance_constraint.active
model.backend.activate_constraint("system_balance_constraint", active=True)
assert model._backend_model.system_balance_constraint.active
def test_fail_on_activate_unknown_constraint(self, model):
"""
test that the function activate_constraint fails if unknown constraint
"""
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.activate_constraint("unknown_constraint", active=False)
assert check_error_or_warning(
excinfo,
"constraint/objective `unknown_constraint` not in the Pyomo Backend.",
)
def test_fail_on_parameter_activate(self, model):
"""
test that the function activate_constraint fails if trying to activate a
non-constraint Pyomo object.
"""
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.activate_constraint("resource", active=False)
assert check_error_or_warning(
excinfo, "`resource` not a constraint in the Pyomo Backend."
)
def test_non_boolean_parameter_activate(self, model):
"""
test that the function activate_constraint fails when setting active to
non-boolean
"""
with pytest.raises(ValueError) as excinfo:
model.backend.activate_constraint("system_balance_constraint", active=None)
assert check_error_or_warning(
excinfo, "Argument `active` must be True or False"
)
class TestBackendRerun:
def test_rerun(self, model):
"""
test that the function rerun works
"""
with pytest.warns(exceptions.ModelWarning) as excinfo:
new_model = model.backend.rerun()
assert isinstance(new_model, calliope.Model)
for i in ["_timings", "inputs", "results"]:
assert hasattr(new_model, i)
# In new_model.inputs all NaN values have been replace by their default,
# so we can't directly compare new_model.inputs and model.inputs
assert new_model.inputs.equals(
model.backend.access_model_inputs().reindex(new_model.inputs.coords)
)
assert new_model.results.equals(model.results)
assert check_error_or_warning(
excinfo, "The results of rerunning the backend model are only available"
)
def test_update_and_rerun(self, model):
"""
test that the function rerun works
"""
model.backend.update_param("energy_cap_max", {("b", "test_supply_elec"): 20})
with pytest.warns(exceptions.ModelWarning) as excinfo:
new_model = model.backend.rerun()
assert (
new_model.inputs.energy_cap_max.loc[
{"nodes": "b", "techs": "test_supply_elec"}
]
== 20
)
assert check_error_or_warning(
excinfo, "The results of rerunning the backend model are only available"
)
def test_rerun_fail_on_operate(self, model):
# should fail if the run mode is not 'plan'
model.run_config["mode"] = "operate"
with pytest.raises(exceptions.ModelError) as excinfo:
model.backend.rerun()
assert check_error_or_warning(
excinfo, "Cannot rerun the backend in operate run mode"
)
class TestGetAllModelAttrs:
def test_get_all_attrs(self, model):
"""Model attributes consist of variables, parameters, and sets"""
attrs = model.backend.get_all_model_attrs()
assert attrs.keys() == set(["Set", "Param", "Var", "Expression"])
assert isinstance(attrs["Var"], dict)
assert isinstance(attrs["Param"], dict)
assert isinstance(attrs["Expression"], dict)
assert isinstance(attrs["Set"], list)
def test_check_attrs(self, model):
"""Test one of each object type, just to make sure they are correctly assigned"""
attrs = model.backend.get_all_model_attrs()
assert "energy_cap" in attrs["Var"].keys()
assert "cost" in attrs["Expression"].keys()
assert "resource" in attrs["Param"].keys()
assert "carriers" in attrs["Set"]
class TestAddConstraint:
def test_no_backend(self, model):
"""Must include 'backend_model' as first function argument """
def energy_cap_time_varying_rule(backend, node, tech, timestep):
return (
backend.energy_cap[node, tech]
<= backend.energy_cap[node, tech]
* backend.resource[node, tech, timestep]
)
constraint_name = "energy_cap_time_varying"
constraint_sets = ["nodes", "techs", "timesteps"]
with pytest.raises(AssertionError) as excinfo:
model.backend.add_constraint(
constraint_name, constraint_sets, energy_cap_time_varying_rule
)
assert check_error_or_warning(
excinfo, "First argument of constraint function must be 'backend_model'."
)
def test_arg_mismatch(self, model):
"""length of function arguments = length of sets + 1"""
def energy_cap_time_varying_rule(
backend_model, node, tech, timestep, extra_arg
):
return (
backend_model.energy_cap[node, tech]
<= backend_model.energy_cap[node, tech]
* backend_model.resource[node, tech, timestep]
+ extra_arg
)
constraint_name = "energy_cap_time_varying"
constraint_sets = ["nodes", "techs", "timesteps"]
with pytest.raises(AssertionError) as excinfo:
model.backend.add_constraint(
constraint_name, constraint_sets, energy_cap_time_varying_rule
)
assert check_error_or_warning(
excinfo,
"Number of constraint arguments must equal number of constraint sets + 1.",
)
def test_sets(self, model):
"""Constraint sets must be backend model sets"""
def energy_cap_time_varying_rule(backend_model, node, tech, not_a_set):
return (
backend_model.energy_cap[node, tech]
<= backend_model.energy_cap[node, tech]
* backend_model.resource[node, tech, not_a_set]
)
constraint_name = "energy_cap_time_varying"
constraint_sets = ["nodes", "techs", "not_a_set"]
with pytest.raises(AttributeError) as excinfo:
model.backend.add_constraint(
constraint_name, constraint_sets, energy_cap_time_varying_rule
)
assert check_error_or_warning(
excinfo, "Pyomo backend model object has no attribute 'not_a_set'"
)
@pytest.mark.xfail(
reason="currently generic sets don't work, choosing to ignore and then override with custom constraints"
)
def test_added_constraint(self, model):
"""
Test the successful addition of a constraint which only allows carrier
consumption at a maximum rate of half the energy capacity.
"""
def new_constraint_rule(backend_model, node, tech, timestep):
carrier_con = backend_model.carrier_con[:, node, tech, timestep]
timestep_resolution = backend_model.timestep_resolution[timestep]
return po.quicksum(carrier_con) * 2 >= (
-1 * backend_model.energy_cap[node, tech] * timestep_resolution
)
constraint_name = "new_constraint"
constraint_sets = ["nodes", "techs", "timesteps"]
model.backend.add_constraint(
constraint_name, constraint_sets, new_constraint_rule
)
assert hasattr(model._backend_model, "new_constraint")
new_model = model.backend.rerun()
assert (
new_model.results.energy_cap.loc[("b", "test_demand_elec")]
== model.results.energy_cap.loc[("b", "test_demand_elec")] * 2
)
| apache-2.0 |
ClimbsRocks/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
futurulus/scipy | scipy/stats/_discrete_distns.py | 34 | 21220 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
taylorwood/Kaggle.HomeDepot | ProjectSearchRelevance.Python/ProductSearchRelevance/sklearn_random_forest.py | 2 | 2812 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer('english')
df_train = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/train.csv', encoding="ISO-8859-1")
df_test = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/test.csv', encoding="ISO-8859-1")
df_pro_desc = pd.read_csv('C:/Git/Kaggle.HomeDepot/ProjectSearchRelevance.Python/ProductSearchRelevance/input/product_descriptions.csv')
#Get number of rows in the train dataframe
num_train = df_train.shape[0]
df_train.head()
#functon that takes in a s
#splits s into words
#then makes all words lower
#iterate through all of the words
#and get tehm stem word
#return each word joined by a space
def str_stemmer(s):
return " ".join([stemmer.stem(word) for word in s.lower().split()])
str_stemmer("angle bracket")
#given two words
#find how many times word 1 is in word 2
def str_common_word(str1, str2):
return sum(int(str2.find(word)>=0) for word in str1.split())
#bring train and test together
#then bring in description
#for 1 massive data frame
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
df_all = pd.merge(df_all, df_pro_desc, how='left', on='product_uid')
df_all.describe()
df_all.head()
#stem all of the different fields
df_all['search_term'] = df_all['search_term'].map(lambda x:str_stemmer(x))
df_all['product_title'] = df_all['product_title'].map(lambda x:str_stemmer(x))
df_all['product_description'] = df_all['product_description'].map(lambda x:str_stemmer(x))
df_all['len_of_query'] = df_all['search_term'].map(lambda x:len(x.split())).astype(np.int64)
df_all['product_info'] = df_all['search_term']+"\t"+df_all['product_title']+"\t"+df_all['product_description']
df_all['word_in_title'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[1]))
df_all['word_in_description'] = df_all['product_info'].map(lambda x:str_common_word(x.split('\t')[0],x.split('\t')[2]))
df_all = df_all.drop(['search_term','product_title','product_description','product_info'],axis=1)
df_train = df_all.iloc[:num_train]
df_test = df_all.iloc[num_train:]
id_test = df_test['id']
y_train = df_train['relevance'].values
X_train = df_train.drop(['id','relevance'],axis=1).values
X_test = df_test.drop(['id','relevance'],axis=1).values
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
#C:\Users\DIXON15\Documents\Python Scripts
pd.DataFrame({"id": id_test, "relevance": y_pred}).to_csv('pythonSubmission.csv',index=False) | mit |
DinoCow/airflow | setup.py | 2 | 27515 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import logging
import os
import subprocess
import sys
import unittest
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, Distribution, find_namespace_packages, setup
logger = logging.getLogger(__name__)
version = '2.0.0'
PY3 = sys.version_info[0] == 3
my_dir = dirname(__file__)
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f'.dev0+{sha}.dirty'
# commit is clean
return f'.release:{version_}+{sha}'
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
if os.environ.get('USE_THEME_FROM_GIT'):
_SPHINX_AIRFLOW_THEME_URL = (
"@ https://github.com/apache/airflow-site/releases/download/0.0.4/"
"sphinx_airflow_theme-0.0.4-py3-none-any.whl"
)
else:
_SPHINX_AIRFLOW_THEME_URL = ''
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.15.0,<1.16.0',
'botocore>=1.18.0,<1.19.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = ['cloudpickle>=1.4.1, <1.5.0', 'distributed>=2.11.1, <2.20']
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1",
f"sphinx-airflow-theme{_SPHINX_AIRFLOW_THEME_URL}",
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib<1.2.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0,<8.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=1.0.0,<2.0.0',
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=2.0.0,<3.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-memcache>=0.2.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-os-login>=2.0.0,<3.0.0',
'google-cloud-pubsub>=2.0.0,<3.0.0',
'google-cloud-redis>=2.0.0,<3.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0, <12.0.0',
]
kylin = ['kylinpy>=2.6']
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1,>=2.1.5',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pdpyras>=4.1.2,<5',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = ['presto-python-client>=0.7.0,<0.8']
qubole = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp<5.0.0',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
# The `azure` provider uses legacy `azure-storage` library, where `snowflake` uses the
# newer and more stable versions of those libraries. Most of `azure` operators and hooks work
# fine together with `snowflake` because the deprecated library does not overlap with the
# new libraries except the `blob` classes. So while `azure` works fine for most cases
# blob is the only exception
# Solution to that is being worked on in https://github.com/apache/airflow/pull/12188
# once it is merged, we can move those two back to `azure` extra.
'azure-storage-blob',
'azure-storage-common',
# snowflake is not compatible with latest version.
# This library monkey patches the requests library, so SSL is broken globally.
# See: https://github.com/snowflakedb/snowflake-connector-python/issues/324
'requests<2.24.0',
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
telegram = [
'python-telegram-bot==13.0',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandex = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
all_dbs = (
cassandra
+ cloudant
+ druid
+ exasol
+ hdfs
+ hive
+ mongo
+ mssql
+ mysql
+ pinot
+ postgres
+ presto
+ vertica
)
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'black',
'blinker',
'bowler',
'click~=7.1',
'contextdecorator;python_version<"3.4"',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'importlib-resources~=1.4',
'ipdb',
'jira',
'mongomock',
'moto',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.6.0',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'testfixtures',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
if PY3:
devel += ['mypy==0.770']
else:
devel += ['unittest2']
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you have a 'pip check' problem with dependencies, it might be because some dependency has been
# installed via 'install_requires' in setup.cfg in higher version than required in one of the options below.
# For example pip check was failing with requests=2.25.1 installed even if in some dependencies below
# < 2.24.0 was specified for it. Solution in such case is to add such limiting requirement to
# install_requires in setup.cfg (we've added requests<2.24.0 there to limit requests library).
# This should be done with appropriate comment explaining why the requirement was added.
############################################################################################################
# Those are requirements that each provider package has
PROVIDERS_REQUIREMENTS: Dict[str, Iterable[str]] = {
"amazon": amazon,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"celery": celery,
"cloudant": cloudant,
"cncf.kubernetes": kubernetes,
"databricks": databricks,
"datadog": datadog,
"dingding": [],
"discord": [],
"docker": docker,
"elasticsearch": elasticsearch,
"exasol": exasol,
"facebook": facebook,
"ftp": [],
"google": google,
"grpc": grpc,
"hashicorp": hashicorp,
"http": [],
"imap": [],
"jdbc": jdbc,
"jenkins": jenkins,
"jira": jira,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
"mongo": mongo,
"mysql": mysql,
"odbc": odbc,
"openfaas": [],
"opsgenie": [],
"oracle": oracle,
"pagerduty": pagerduty,
"papermill": papermill,
"plexus": plexus,
"postgres": postgres,
"presto": presto,
"qubole": qubole,
"redis": redis,
"salesforce": salesforce,
"samba": samba,
"segment": segment,
"sendgrid": sendgrid,
"sftp": ssh,
"singularity": singularity,
"slack": slack,
"snowflake": snowflake,
"sqlite": [],
"ssh": ssh,
"telegram": telegram,
"vertica": vertica,
"yandex": yandex,
"zendesk": zendesk,
}
# Those are requirements that each extra has. For extras that match the providers
# the requirements are identical as in the list above, but we have still a few aliases
# that have different set of requirements.
EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {
'all_dbs': all_dbs,
'amazon': amazon,
'apache.atlas': atlas,
'apache.beam': apache_beam,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"apache.webhdfs": webhdfs,
'async': async_packages,
'atlas': atlas, # TODO: remove this in Airflow 3.0
'aws': amazon, # TODO: remove this in Airflow 3.0
'azure': azure, # TODO: remove this in Airflow 3.0
'cassandra': cassandra, # TODO: remove this in Airflow 3.0
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'crypto': [], # TODO: remove this in Airflow 3.0
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'dingding': [],
'discord': [],
'docker': docker,
'druid': druid, # TODO: remove this in Airflow 3.0
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'ftp': [],
'gcp': google, # TODO: remove this in Airflow 3.0
'gcp_api': google, # TODO: remove this in Airflow 3.0
'github_enterprise': flask_oauth,
'google': google,
'google_auth': flask_oauth,
'grpc': grpc,
'hashicorp': hashicorp,
'hdfs': hdfs, # TODO: remove this in Airflow 3.0
'hive': hive, # TODO: remove this in Airflow 3.0
'http': [],
'imap': [],
'jdbc': jdbc,
'jenkins': [],
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes, # TODO: remove this in Airflow 3.0
'ldap': ldap,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
'mongo': mongo,
'mssql': mssql, # TODO: remove this in Airflow 3.0
'mysql': mysql,
'odbc': odbc,
'openfaas': [],
'opsgenie': [],
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'password': password,
'pinot': pinot, # TODO: remove this in Airflow 3.0
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qds': qubole, # TODO: remove this in Airflow 3.0
'qubole': qubole,
'rabbitmq': rabbitmq,
'redis': redis,
's3': amazon, # TODO: remove this in Airflow 3.0
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sentry': sentry,
'sftp': [],
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'spark': spark,
'sqlite': [],
'ssh': ssh,
'statsd': statsd,
'tableau': tableau,
'telegram': telegram,
'vertica': vertica,
'virtualenv': virtualenv,
'webhdfs': webhdfs, # TODO: remove this in Airflow 3.0
'winrm': winrm, # TODO: remove this in Airflow 3.0
'yandex': yandex,
'zendesk': [],
}
# Those are airflow providers added for the extras in many cases extra = provider
# But for aliases and some special aliases (like all_dbs) the list might be longer.
EXTRAS_PROVIDERS_PACKAGES: Dict[str, Iterable[str]] = {
'all': list(PROVIDERS_REQUIREMENTS.keys()),
# this is not 100% accurate with devel_ci and devel_all definition, but we really want
# to have all providers when devel_ci extra is installed!
'devel_ci': list(PROVIDERS_REQUIREMENTS.keys()),
'devel_all': list(PROVIDERS_REQUIREMENTS.keys()),
'all_dbs': [
"apache.cassandra",
"apache.druid",
"apache.hdfs",
"apache.hive",
"apache.pinot",
"cloudant",
"exasol",
"mongo",
"microsoft.mssql",
"mysql",
"postgres",
"presto",
"vertica",
],
'amazon': ["amazon"],
'apache.atlas': [],
'apache.beam': [],
"apache.cassandra": ["apache.cassandra"],
"apache.druid": ["apache.druid"],
"apache.hdfs": ["apache.hdfs"],
"apache.hive": ["apache.hive"],
"apache.kylin": ["apache.kylin"],
"apache.livy": ["apache.livy"],
"apache.pig": ["apache.pig"],
"apache.pinot": ["apache.pinot"],
"apache.spark": ["apache.spark"],
"apache.sqoop": ["apache.sqoop"],
"apache.webhdfs": ["apache.hdfs"],
'async': [],
'atlas': [], # TODO: remove this in Airflow 3.0
'aws': ["amazon"], # TODO: remove this in Airflow 3.0
'azure': ["microsoft.azure"], # TODO: remove this in Airflow 3.0
'cassandra': ["apache.cassandra"], # TODO: remove this in Airflow 3.0
'celery': ["celery"],
'cgroups': [],
'cloudant': ["cloudant"],
'cncf.kubernetes': ["cncf.kubernetes"],
'crypto': [], # TODO: remove this in Airflow 3.0
'dask': [],
'databricks': ["databricks"],
'datadog': ["datadog"],
'devel': ["cncf.kubernetes", "mysql"],
'devel_hadoop': ["apache.hdfs", "apache.hive", "presto"],
'dingding': ["dingding"],
'discord': ["discord"],
'doc': [],
'docker': ["docker"],
'druid': ["apache.druid"], # TODO: remove this in Airflow 3.0
'elasticsearch': ["elasticsearch"],
'exasol': ["exasol"],
'facebook': ["facebook"],
'ftp': ["ftp"],
'gcp': ["google"], # TODO: remove this in Airflow 3.0
'gcp_api': ["google"], # TODO: remove this in Airflow 3.0
'github_enterprise': [],
'google': ["google"],
'google_auth': [],
'grpc': ["grpc"],
'hashicorp': ["hashicorp"],
'hdfs': ["apache.hdfs"], # TODO: remove this in Airflow 3.0
'hive': ["apache.hive"], # TODO: remove this in Airflow 3.0
'http': ["http"],
'imap': ["imap"],
'jdbc': ["jdbc"],
'jenkins': ["jenkins"],
'jira': ["jira"],
'kerberos': [],
'kubernetes': ["cncf.kubernetes"], # TODO: remove this in Airflow 3.0
'ldap': [],
"microsoft.azure": ["microsoft.azure"],
"microsoft.mssql": ["microsoft.mssql"],
"microsoft.winrm": ["microsoft.winrm"],
'mongo': ["mongo"],
'mssql': ["microsoft.mssql"], # TODO: remove this in Airflow 3.0
'mysql': ["mysql"],
'odbc': ["odbc"],
'openfaas': ["openfaas"],
'opsgenie': ["opsgenie"],
'oracle': ["oracle"],
'pagerduty': ["pagerduty"],
'papermill': ["papermill"],
'password': [],
'pinot': ["apache.pinot"], # TODO: remove this in Airflow 3.0
'plexus': ["plexus"],
'postgres': ["postgres"],
'presto': ["presto"],
'qds': ["qubole"], # TODO: remove this in Airflow 3.0
'qubole': ["qubole"],
'rabbitmq': [],
'redis': ["redis"],
's3': ["amazon"], # TODO: remove this in Airflow 3.0
'salesforce': ["salesforce"],
'samba': ["samba"],
'segment': ["segment"],
'sendgrid': ["sendgrid"],
'sentry': [],
'sftp': ["sftp"],
'singularity': ["singularity"],
'slack': ["slack"],
'snowflake': ["snowflake"],
'spark': ["apache.spark"],
'sqlite': ["sqlite"],
'ssh': ["ssh"],
'statsd': [],
'tableau': [],
'telegram': ["telegram"],
'vertica': ["vertica"],
'virtualenv': [],
'webhdfs': ["apache.hdfs"], # TODO: remove this in Airflow 3.0
'winrm': ["microsoft.winrm"], # TODO: remove this in Airflow 3.0
'yandex': ["yandex"],
'zendesk': ["zendesk"],
}
# Those are all "users" extras (no devel extras)
all_ = list(
set(
[req for req_list in EXTRAS_REQUIREMENTS.values() for req in req_list]
+ [req for req_list in PROVIDERS_REQUIREMENTS.values() for req in req_list]
)
)
# Those are special extras
EXTRAS_REQUIREMENTS.update(
{
'all': all_,
'devel': devel_minreq, # includes doc
'devel_hadoop': devel_hadoop, # includes devel_minreq
'doc': doc,
}
)
# This can be simplify to devel_hadoop + all_ due to inclusions
# but we keep it for explicit sake
devel_all = list(set(all_ + doc + devel_minreq + devel_hadoop))
# Those are packages excluded for "all" dependencies
PACKAGES_EXCLUDED_FOR_ALL = []
if PY3:
PACKAGES_EXCLUDED_FOR_ALL.extend(
[
'snakebite',
]
)
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite.
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any(package.startswith(excluded_package) for excluded_package in exclusion_list)
devel_all = [
package
for package in devel_all
if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [
package
for package in devel_all
if not is_package_excluded(
package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL
)
]
# Those are development requirements that install all useful devel tools
EXTRAS_REQUIREMENTS.update(
{
'devel_all': devel_all,
'devel_ci': devel_ci,
}
)
class AirflowDistribtuion(Distribution):
"""setuptools.Distribution subclass with Airflow specific behaviour"""
# https://github.com/PyCQA/pylint/issues/3737
def parse_config_files(self, *args, **kwargs): # pylint: disable=signature-differs
"""
Ensure that when we have been asked to install providers from sources
that we don't *also* try to install those providers from PyPI
"""
super().parse_config_files(*args, **kwargs)
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
self.install_requires = [ # pylint: disable=attribute-defined-outside-init
req for req in self.install_requires if not req.startswith('apache-airflow-providers-')
]
def get_provider_package_from_package_id(package_id: str):
"""
Builds the name of provider package out of the package id provided/
:param package_id: id of the package (like amazon or microsoft.azure)
:return: full name of package in PyPI
"""
package_suffix = package_id.replace(".", "-")
return f"apache-airflow-providers-{package_suffix}"
def do_setup():
"""Perform the Airflow package setup."""
setup_kwargs = {}
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
# Only specify this if we need this option, otherwise let default from
# setup.cfg control this (kwargs in setup() call take priority)
setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])
else:
for key, value in EXTRAS_PROVIDERS_PACKAGES.items():
EXTRAS_REQUIREMENTS[key].extend(
[get_provider_package_from_package_id(package_name) for package_name in value]
)
write_version()
setup(
distclass=AirflowDistribtuion,
# Most values come from setup.cfg -- see
# https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
version=version,
extras_require=EXTRAS_REQUIREMENTS,
download_url=('https://archive.apache.org/dist/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
**setup_kwargs,
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
CUFCTL/DLBD | Fall2017/simple_linear_net.py | 1 | 1864 | import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
"""
This script generates and classifies a spiral dataset using a two layer
fully connected neural network in PyTorch.
"""
N = 100 # Number of elements per class
D = 2 # Dimensionality of the data
K = 3 # number of classes
def gen_spiral_dataset(N=100, D=2, K=3):
X = np.zeros((N*K, D))
y = np.zeros(N*K, dtype=np.uint8)
for j in range(K):
ind = range(N*j, N*(j+1))
r = np.linspace(0,0.1,N)
t = np.linspace(j*4, (j+1)*4, N) + np.random.randn(N)*0.2
X[ind] = np.c_[r*np.sin(t), r*np.cos(t)]
y[ind] = j
return X, y
class Net(nn.Module):
"""
Net: A simple three layer neural network.
"""
def __init__(self):
super(Net, self).__init__()
self.layer1 = nn.Linear(D, 64) # Linear layer: y = Wx + b
self.layer2 = nn.Linear(64, K)
self.softmax = nn.LogSoftmax()
def forward(self, x):
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.softmax(x2)
return x3
net = Net()
print(net)
X,y = gen_spiral_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.show()
lossfn = nn.NLLLoss()
optimz = optim.SGD(net.parameters(), lr=1e-3, momentum=0.9)
var_y = Variable(torch.from_numpy(y).type(torch.LongTensor))
var_x = Variable(torch.from_numpy(X).type(torch.FloatTensor), requires_grad=True)
def train(net):
net.train()
for ep in range(30):
ov_loss = 0.0
for i in range(N*K):
optimz.zero_grad()
op = net(var_x)
loss = lossfn(op, var_y)
loss.backward()
optimz.step()
if i%100 == 0:
print("Epoch:%d, Iteration:%d, Loss: %.3f"%(ep, i, loss.data[0]))
print("Finished training\n")
def eval(net):
net.eval()
op = net(var_x[0, :])
max_val = op.data.max(0)[1]
if __name__ == '__main__':
train(net)
eval(net)
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_ps.py | 2 | 63263 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import StringIO
import glob, os, shutil, sys, time, datetime
import io
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams, checkdep_ghostscript
from matplotlib.afm import AFM
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.cbook import (get_realpath_and_stat, is_writable_file_like,
maxdict, file_requires_unicode)
from matplotlib.compat.subprocess import subprocess
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
from matplotlib.ft2font import KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
gs_exe, gs_version = checkdep_ghostscript()
if gs_exe is None:
gs_exe = 'gs'
self._cached["gs_exe"] = str(gs_exe)
return str(gs_exe)
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from matplotlib.compat.subprocess import Popen, PIPE
s = Popen([self.gs_exe, "--version"], stdout=PIPE)
pipe, stderr = s.communicate()
if six.PY3:
ver = pipe.decode('ascii')
else:
ver = pipe
try:
gs_version = tuple(map(int, ver.strip().split(".")))
except ValueError:
# if something went wrong parsing return null version number
gs_version = (0, 0)
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = list(six.iterkeys(papersize))
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
return 'a0'
def _num_to_str(val):
if isinstance(val, six.string_types): return val
ival = int(val)
if val == ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s = s.replace(b"\\", b"\\\\")
s = s.replace(b"(", b"\\(")
s = s.replace(b")", b"\\)")
s = s.replace(b"'", b"\\251")
s = s.replace(b"`", b"\\301")
s = re.sub(br"[^ -~\n]", lambda x: br"\%03o" % ord(x.group()), s)
return s.decode('ascii')
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
linewidth = float(linewidth)
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if np.array_equal(seq, oldseq) and oldo == offset:
return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store:
self.linedash = (offset, seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname, fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
linewidth = rcParams['hatch.linewidth']
pageheight = self.height * 72
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
%(linewidth)f setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(sidelen),
simplify=False))
self._pswriter.write("""\
fill
stroke
} bind
>>
matrix
0.0 %(pageheight)f translate
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width * 72.0, self.height * 72.0
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with io.open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
fname = findfont(prop)
font = get_font(fname)
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgb(self, rgba):
h, w = rgba.shape[:2]
rgb = rgba[::-1, :, :3]
return h, w, rgb.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def _get_image_h_w_bits_command(self, im):
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
"""
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if transform is None:
matrix = "1 0 0 1 0 0"
xscale = w / self.image_magnification
yscale = h / self.image_magnification
else:
matrix = " ".join(map(str, transform.frozen().to_values()))
xscale = 1.0
yscale = 1.0
figh = self.height * 72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
[%(matrix)s] concat
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
def _convert_path(self, path, transform, clip=False, simplify=None):
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, None,
6, [b'm', b'l', b'', b'c', b'cl'], True).decode('ascii')
def _get_clip_path(self, clippath, clippath_transform):
key = (clippath, id(clippath_transform))
pid = self._clip_paths.get(key)
if pid is None:
pid = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % pid]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[key] = pid
return pid
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.width*72, self.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 2) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 3 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'][0], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y-bl)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1,0,0,6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3,1,0x0409,6)].decode(
'utf-16be')
ps_name = ps_name.encode('ascii', 'replace').decode('ascii')
self.set_font(ps_name, prop.get_size_in_points())
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = 0
for c in s:
ccode = ord(c)
gind = font.get_char_index(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_points = trans.transform(flat_points)
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 12)
points_max = np.max(flat_points, axis=0) + (1 << 12)
factor = np.ceil(float(2 ** 32 - 1) / (points_max - points_min))
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
hatch = gc.get_hatch()
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke or hatch:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke or hatch:
write("grestore\n")
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("%f %f %f " % gc.get_hatch_color()[:3])
write("%s setpattern fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
fixed_dpi = 72
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError('%s is not a valid papertype. Use one of %s' %
(papertype, ', '.join(papersize)))
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
metadata=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
metadata must be a dictionary. Currently, only the value for
the key 'Creator' is used.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if isinstance(outfile, six.string_types):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# check for custom metadata
if metadata is not None and 'Creator' in metadata:
creator_str = metadata['Creator']
else:
creator_str = "matplotlib version " + __version__ + \
", http://matplotlib.org/"
def print_figure_impl(fh):
# write the PostScript headers
if isEPSF:
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else:
print("%!PS-Adobe-3.0", file=fh)
if title:
print("%%Title: "+title, file=fh)
print("%%Creator: " + creator_str, file=fh)
# get source date from SOURCE_DATE_EPOCH, if set
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
if source_date_epoch:
source_date = datetime.datetime.utcfromtimestamp(
int(source_date_epoch)).strftime("%a %b %d %H:%M:%S %Y")
else:
source_date = time.ctime()
print("%%CreationDate: "+source_date, file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF:
print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF:
print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def" % Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d = d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in six.itervalues(
ps_renderer.used_characters):
if len(chars):
font = get_font(font_filename)
glyph_ids = []
for c in chars:
gind = font.get_char_index(c)
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
msg = ("OpenType CFF fonts can not be saved "
"using the internal Postscript backend "
"at this time.\nConsider using the "
"Cairo backend.")
raise RuntimeError(msg)
else:
fh.flush()
convert_ttf_to_ps(
font_filename.encode(
sys.getfilesystemencoding()),
fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF:
print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
print("%s translate" % _nums_to_str(xo, yo), file=fh)
if rotation:
print("%d rotate" % rotation, file=fh)
print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0),
file=fh)
# write the figure
content = self._pswriter.getvalue()
if not isinstance(content, six.text_type):
content = content.decode('ascii')
print(content, file=fh)
# write the trailer
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF:
print("%%EOF", file=fh)
fh.flush()
if rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
fd, tmpfile = mkstemp()
try:
with io.open(fd, 'w', encoding='latin-1') as fh:
print_figure_impl(fh)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
finally:
if os.path.isfile(tmpfile):
os.unlink(tmpfile)
else:
# Write directly to outfile.
if passed_in_file_object:
requires_unicode = file_requires_unicode(outfile)
if (not requires_unicode and
(six.PY3 or not isinstance(outfile, StringIO))):
fh = io.TextIOWrapper(outfile, encoding="latin-1")
# Prevent the io.TextIOWrapper from closing the
# underlying file
def do_nothing():
pass
fh.close = do_nothing
else:
fh = outfile
print_figure_impl(fh)
else:
with io.open(outfile, 'w', encoding='latin-1') as fh:
print_figure_impl(fh)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype, metadata=None,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
metadata must be a dictionary. Currently, only the value for
the key 'Creator' is used.
"""
isEPSF = format == 'eps'
if isinstance(outfile, six.string_types):
title = outfile
elif is_writable_file_like(outfile):
title = None
else:
raise ValueError("outfile must be a path or a file-like object")
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# check for custom metadata
if metadata is not None and 'Creator' in metadata:
creator_str = metadata['Creator']
else:
creator_str = "matplotlib version " + __version__ + \
", http://matplotlib.org/"
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
try:
with io.open(fd, 'w', encoding='latin-1') as fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title:
print("%%Title: "+title, file=fh)
print("%%Creator: " + creator_str, file=fh)
# get source date from SOURCE_DATE_EPOCH, if set
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
if source_date_epoch:
source_date = datetime.datetime.utcfromtimestamp(
int(source_date_epoch)).strftime(
"%a %b %d %H:%M:%S %Y")
else:
source_date = time.ctime()
print("%%CreationDate: "+source_date, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def" % Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d = d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
print("%s translate" % _nums_to_str(xo, yo), file=fh)
print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0),
file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype == 'auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width > paperWidth or height > paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report(
('Your figure is too big to fit on %s paper. %s '
'paper will be used to prevent clipping.'
) % (papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth,
paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False:
pass # for debugging
else:
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
if is_writable_file_like(outfile):
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
finally:
if os.path.isfile(tmpfile):
os.unlink(tmpfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation == 'landscape':
angle = 90
else:
angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\\usepackage{psfrag}
\\usepackage[dvips]{graphicx}
\\usepackage{color}
\\pagestyle{empty}
\\begin{document}
\\begin{figure}
\\centering
\\leavevmode
%s
\\includegraphics*[angle=%s]{%s}
\\end{figure}
\\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# Replace \\ for / so latex does not think there is a function call
latexfile = latexfile.replace("\\", "/")
# Replace ~ so Latex does not think it is line break
latexfile = latexfile.replace("~", "\\string~")
command = [str("latex"), "-interaction=nonstopmode",
'"%s"' % latexfile]
verbose.report(command, 'debug')
try:
report = subprocess.check_output(command, cwd=tmpdir,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('LaTeX was not able to process the following '
'file:\n%s\n\n'
'Here is the full report generated by LaTeX:\n%s '
'\n\n' % (latexfile,
exc.output.decode("utf-8"))))
verbose.report(report, 'debug')
command = [str('dvips'), '-q', '-R0', '-o', os.path.basename(psfile),
os.path.basename(dvifile)]
verbose.report(command, 'debug')
try:
report = subprocess.check_output(command, cwd=tmpdir,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('dvips was not able to process the following '
'file:\n%s\n\n'
'Here is the full report generated by dvips:\n%s '
'\n\n' % (dvifile,
exc.output.decode("utf-8"))))
verbose.report(report, 'debug')
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with io.open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps:
paper_option = "-dEPSCrop"
else:
paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = [str(gs_exe), "-dBATCH", "-dNOPAUSE", "-r%d" % dpi,
"-sDEVICE=%s" % device_name, paper_option,
"-sOutputFile=%s" % psfile, tmpfile]
verbose.report(command, 'debug')
try:
report = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('ghostscript was not able to process your image.\n'
'Here is the full report generated by ghostscript:\n%s '
'\n\n' % exc.output.decode("utf-8")))
verbose.report(report, 'debug')
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
if eps:
paper_option = "-dEPSCrop"
else:
if sys.platform == "win32":
paper_option = "-sPAPERSIZE#%s" % ptype
else:
paper_option = "-sPAPERSIZE=%s" % ptype
if sys.platform == "win32":
command = [str("ps2pdf"), "-dAutoFilterColorImages#false",
"-dAutoFilterGrayImages#false",
"-sGrayImageFilter#FlateEncode",
"-sColorImageFilter#FlateEncode", paper_option, tmpfile,
pdffile]
else:
command = [str("ps2pdf"), "-dAutoFilterColorImages=false",
"-dAutoFilterGrayImages=false",
"-sGrayImageFilter=FlateEncode",
"-sColorImageFilter=FlateEncode", paper_option, tmpfile,
pdffile]
verbose.report(command, 'debug')
try:
report = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('ps2pdf was not able to process your image.\n'
'Here is the full report generated by ps2pdf:\n%s '
'\n\n' % exc.output.decode("utf-8")))
verbose.report(report, 'debug')
command = [str("pdftops"), "-paper", "match", "-level2", pdffile, psfile]
verbose.report(command, 'debug')
try:
report = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
('pdftops was not able to process your image.\n'
'Here is the full report generated by pdftops:\n%s '
'\n\n' % exc.output.decode("utf-8")))
verbose.report(report, 'debug')
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box.
Return an appropriately sized bbox centered around that point. A bit of a
hack.
"""
gs_exe = ps_backend_helper.gs_exe
command = [gs_exe, "-dBATCH", "-dNOPAUSE", "-sDEVICE=bbox" "%s" % tmpfile]
verbose.report(command, 'debug')
p = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
(stdout, stderr) = (p.stdout, p.stderr)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s' % bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh, io.open(tmpfile, 'rb') as tmph:
write = epsh.write
# Modify the header:
for line in tmph:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n'
b'save\n'
b'countdictstack\n'
b'mark\n'
b'newpath\n'
b'/showpage {} def\n'
b'/setpagedevice {pop} def\n'
b'%%EndProlog\n'
b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and line.startswith((b'%%Bound', b'%%HiResBound',
b'%%DocumentMedia', b'%%Pages')):
pass
else:
write(line)
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
for line in tmph:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n'
b'countdictstack\n'
b'exch sub { end } repeat\n'
b'restore\n'
b'showpage\n'
b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
@_Backend.export
class _BackendPS(_Backend):
FigureCanvas = FigureCanvasPS
FigureManager = FigureManagerPS
| mit |
ngoix/OCRF | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| agpl-3.0 |
appapantula/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
bensondaled/pyfluo | pyfluo/sandbox/groups.py | 1 | 14755 | import re, os, shutil, h5py, warnings
import matplotlib.pyplot as pl
import pandas as pd
from .config import *
from .motion import compute_motion, apply_motion_correction
from .movies import Movie
from .fluorescence import compute_dff, detect_transients
from .images import Tiff
from .roi import select_roi, ROI
from .series import Series
class Group():
#TODO: needs MAJOR overhaul, since I adjusted Tiff and Movie. This will be a far less involved class, just handling manipulations and analysis of already-processed groups
# Thinking I should rename it to Data, and its role will be to handle all data related to some images, as opposed to Movie which is specifically for in-memory movies
"""
Stores a set of tif-derived data with associated metadata and computations.
"""
def __init__(self, name, data_path='.'):
# user inputs
self.name = name
self.data_path = data_path
self.grp_path = os.path.join(self.data_path, self.name)
if not os.path.exists(self.grp_path):
# group files into directory
self._build_structure(self.name, in_path=self.data_path, out_path=self.data_path)
# default group files - these paths exist for all groups, whether or not the given file is created yet
self.tifdata_path = os.path.join(self.grp_path, '{}_tifdata.h5'.format(self.name))
self.otherdata_path = os.path.join(self.grp_path, '{}_otherdata.h5'.format(self.name))
self.mc_path = os.path.join(self.grp_path, '{}_mc.h5'.format(self.name))
self.mov_path = os.path.join(self.grp_path, '{}_mov.h5'.format(self.name))
# determine tif data
self._extract_tifdata()
# inferred properties based on tif data
# tif file details
self.tif_names = self.shapes.filename.values
self.tif_files = np.array([tn+'.tif' for tn in self.tif_names])
self.tif_paths = np.array([os.path.join(self.grp_path, tf) for tf in self.tif_files])
# tif file shape details
shp = self.shapes.iloc[0]
assert (self.shapes.x==shp.x).all()
assert (self.shapes.y==shp.y).all()
self.y,self.x = shp[['y','x']]
# movie format availability details
self.tifs_available = all([os.path.exists(f) for f in self.tif_paths])
self.merge_mov_available = os.path.exists(self.mov_path)
self._loaded_example = None
@property
def example(self):
"""
Retrieves an example movie and stores it in memory for repeated use
"""
if self._loaded_example is None:
self._loaded_example = self.get_mov(0)
return self._loaded_example
def _get_mov_idxs(self, idx):
idxs = self.shapes.z.cumsum().values
idxs = np.append(0, idxs)
return [idxs[idx],idxs[idx+1]]
def get_mov(self, idx, crop=False):
if self.merge_mov_available:
with h5py.File(self.mov_path) as f:
filename = f['mov'].attrs['source_names'][idx]
idxs = self._get_mov_idxs(idx)
data = np.asarray(f['mov'][idxs[0]:idxs[1]])
mov = Movie(data, Ts=self.Ts, filename=filename)
elif self.tifs_available:
mov = Movie(self.tif_paths[idx])
mov = apply_motion_correction(mov, self.mc_path, crop=crop)
else:
raise Exception('No available source for movie loading.')
return mov
def merge_movs(self, remove_tifs=False):
"""
If tifs are available, copies all data into a single hdf5, removing tifs if desired
"""
if os.path.exists(self.mov_path):
ans = input('Path to merged movie exists. Overwrite? (y/n)')
if ans == 'y':
os.remove(self.mov_path)
else:
return
assert self.tifs_available, 'Tif files are not present in group directory; merge cannot be made.'
with h5py.File(self.mov_path) as movfile:
ds = movfile.create_dataset('mov', (self.shapes.z.sum(),self.y,self.x), compression='gzip', compression_opts=2)
idx = 0
indices = []
for i,tn in enumerate(self.tif_names):
print(tn)
mov = self.get_mov(i, crop=False)
indices.append([idx,idx+len(mov)])
ds[idx:idx+len(mov)] = np.asarray(mov)
idx += len(mov)
if remove_tifs:
os.remove(self.tif_paths[i])
self.tifs_available = False
ds.attrs['source_names'] = '\n'.join(self.tif_names)
def _extract_tifdata(self, recache=False):
"""Extracts features of tifs from caches if present, or from files if not yet cached
Fields of interest:
-tif file names
-dimensions of tif files
-i2c data from tif files
-sampling interval (Ts) of tifs
"""
if (not os.path.exists(self.tifdata_path)) or recache:
print('Extracting tif data for group...')
self.tif_files = sorted([o for o in os.listdir(self.grp_path) if o.endswith('.tif')])
self.tif_names = [os.path.splitext(o)[0] for o in self.tif_files]
self.tif_paths = [os.path.join(self.grp_path, fn) for fn in self.tif_files]
if len(self.tif_paths) == 0:
warnings.warn('No tif files detected in group.')
return
i2cs, Tss, shapes = [],[],[]
_i2c_ix = 0
for tp in self.tif_paths:
mov = Tiff(tp, load_data=False)
expg = mov.tf_obj.pages[0].asarray()
shapes.append(dict(filename=mov.filename, z=len(mov), y=expg.shape[0], x=expg.shape[1]))
i2c = mov.i2c
if len(i2c):
i2c.ix[:,'filename'] = mov.filename
i2c.index += _i2c_ix
i2cs.append(i2c)
Tss.append(mov.Ts)
_i2c_ix += len(mov)
# i2c
i2c = pd.concat(i2cs).dropna()
# shapes
shapes = pd.DataFrame(shapes)
# Tss
if not all([t==Tss[0] for t in Tss]):
warnings.warn('Ts\'s do not all align in group. Using mean.')
Ts = float(np.mean(Tss))
with pd.HDFStore(self.tifdata_path) as md:
md.put('Ts', pd.Series(Ts))
md.put('i2c', i2c)
md.put('shapes', shapes)
# in all cases:
with pd.HDFStore(self.tifdata_path) as md:
if any([i not in md for i in ['Ts','i2c','shapes']]):
return self._extract_tifdata(recache=True)
self.Ts = float(md['Ts'])
self.i2c = md['i2c']
self.shapes = md['shapes']
def _build_structure(self, name, in_path='.', out_path='.', regex=None, move_files=True):
"""Creates and returns a new group based on the input data
If regex, uses that to include files
If not, uses "name in filename" to include files
"""
# create group
grp_path = os.path.join(out_path, name)
if os.path.exists(grp_path):
raise Exception('Group \'{}\' already exists.'.format(grp_path))
os.mkdir(grp_path)
# load files in specified directory
filenames = [os.path.join(in_path, p) for p in os.listdir(in_path)]
# filter files
if regex is not None:
filenames = [fn for fn in filenames if re.search(regex, fn)]
else:
filenames = [fn for fn in filenames if name in fn]
# move/copy files
if move_files:
mv_func = shutil.move
else:
mv_func = shutil.copy2
for fn in filenames:
mv_func(fn, grp_path)
def motion_correct(self, max_shift=25):
if not self.tifs_available:
raise Exception('Tifs not available to motion correct. This must be done with tifs, since merge movie stores motion-corrected data.')
out_file = h5py.File(self.mc_path)
did_any = False
for fp,fn in zip(self.tif_paths,self.tif_names):
if fn in out_file:
continue
did_any = True
print(fp)
mov = Movie(fp)
templ,vals = compute_motion(mov, max_shift=max_shift)
gr = out_file.create_group(fn)
gr.create_dataset('shifts', data=vals)
gr.create_dataset('template', data=templ)
if 'max_shift' not in out_file.attrs:
out_file.attrs['max_shift'] = max_shift
if did_any:
template_mov = np.asarray([np.asarray(out_file[k]['template']) for k in out_file if 'global' not in k])
glob_template,glob_vals = compute_motion(template_mov, max_shift=max_shift)
if 'global_shifts' in out_file:
del out_file['global_shifts']
shifts_dataset = out_file.create_dataset('global_shifts', data=glob_vals)
shifts_dataset.attrs['filenames'] = np.asarray(self.tif_names).astype('|S150')
if 'global_template' in out_file:
del out_file['global_template']
out_file.create_dataset('global_template', data=glob_template)
out_file.close()
def get_motion(self):
result = []
with h5py.File(self.mc_path) as f:
for tn in self.tif_names:
gr = f[tn]
ds = gr['shifts']
result.append(np.asarray(ds))
return np.concatenate(result)
def get_roi(self, i=None, reselect=False):
if i is None:
i = ''
roiname = 'roi{}'.format(i)
with h5py.File(self.otherdata_path) as od:
if roiname in od and not reselect:
roi = ROI(od[roiname])
else:
if i != '': #specific roi was requested but not present
return None
with h5py.File(self.mc_path) as mc_file:
gt = np.asarray(mc_file['global_template'])
roi = select_roi(img=gt)
self.set_roi(roi)
return roi
def set_roi(self, roi):
with h5py.File(self.otherdata_path) as od:
if 'roi' in od:
i = 0
while 'roi{}'.format(i) in od:
i+=1
od.move('roi', 'roi{}'.format(i))
if 'raw' in od:
od.move('raw', 'raw{}'.format(i))
if 'dff' in od:
od.move('dff', 'dff{}'.format(i))
od.create_dataset('roi', data=np.asarray(roi))
def get_dff(self, i=None, redo_raw=False, redo_dff=False, redo_transients=False, dff_kwargs={}, transient_kwargs={}):
if i is None:
i = ''
dffname = 'dff{}'.format(i)
transname = 'transients{}'.format(i)
rawname = 'raw{}'.format(i)
roiname = 'roi{}'.format(i)
with h5py.File(self.otherdata_path) as od:
if not roiname in od:
raise Exception('No roi specified for trace extraction.')
if rawname in od:
raw_grp = od[rawname]
else:
raw_grp = od.create_group(rawname)
roi = ROI(np.asarray(od[roiname]))
for fileidx,filename in enumerate(self.tif_names):
if (not redo_raw) and filename in raw_grp:
continue
print('Extracting raw from {}'.format(filename))
# clear existing if necessary
if filename in raw_grp and redo_raw:
del raw_grp[filename]
if filename not in raw_grp:
mov = self.get_mov(fileidx)
tr = mov.extract(roi)
raw_grp.create_dataset(filename, data=np.asarray(tr))
raw = [np.asarray(raw_grp[k]) for k in raw_grp]
raw = Series(np.concatenate(raw), Ts=self.Ts)
if (dffname not in od) or redo_dff:
# clear if necessary:
print ('Computing DFF...')
dff = np.asarray(compute_dff(raw, **dff_kwargs))
if dffname in od:
del od[dffname]
od.create_dataset(dffname, data=dff)
dff = Series(np.asarray(od[dffname]), Ts=self.Ts)
if (transname not in od) or redo_transients:
# clear if necessary:
print ('Computing transients...')
trans = np.asarray(detect_transients(dff, **transient_kwargs))
if transname in od:
del od[transname]
od.create_dataset(transname, data=trans)
trans = Series(np.asarray(od[transname]), Ts=self.Ts)
return trans, dff, raw
def extract_roi_mov(self, roi, frame_idxs, mean=False, pad=3):
# frame_idxs: either 2-tuples, or slices
if not os.path.exists(self.mov_path):
raise Exception('Run merge_movs first, this is too slow if you don\'t have hdf5 version stored.')
for idx,fi in enumerate(frame_idxs):
if not isinstance(fi,slice):
frame_idxs[idx] = slice(fi[0], fi[1])
# build bounding box
if roi is None:
roi = np.ones([self.y,self.x])
args = np.argwhere(roi)
(ymin,xmin),(ymax,xmax) = args.min(axis=0),args.max(axis=0)
with h5py.File(self.mov_path) as movfile:
mov = movfile['mov']
yslice = slice(max(0,ymin-pad), min(ymax+pad,mov.shape[1]))
xslice = slice(max(0,xmin-pad), min(xmax+pad,mov.shape[2]))
if mean:
chunk_size = frame_idxs[0].stop-frame_idxs[0].start
assert [(fi.stop-fi.start)==(chunk_size) for fi in frame_idxs]
chunks = np.empty([chunk_size,ymax-ymin,xmax-xmin])
else:
chunks = []
for idx,fi in enumerate(frame_idxs):
ch = mov[fi,yslice,xslice]
if mean:
chunks += ch/len(frame_idxs)
else:
chunks.append(ch)
return np.squeeze(chunks)
def project(self, roi=True, ax=None, show=True):
if ax is None:
ax = pl.gca()
with h5py.File(self.mc_path) as mc_file:
gt = np.asarray(mc_file['global_template'])
if show:
ax.imshow(gt, cmap=pl.cm.Greys_r)
if roi and show:
roi = self.get_roi()
roi.show(labels=True, ax=ax)
return gt
| bsd-2-clause |
wathen/PhD | MHD/FEniCS/ConvergenceTests/Driver.py | 1 | 1126 | from Maxwell import *
from dolfin import *
from numpy import *
import scipy as Sci
import scipy.linalg
from math import pi,sin,cos,sqrt
import scipy.sparse as sps
import scipy.io as save
import scipy
import pdb
from matrix2latex import *
from matplotlib.pylab import *
Mcycle = 8
n= 2
time = zeros((Mcycle,1))
N = zeros((Mcycle,1))
DoF = zeros((Mcycle,1))
table = zeros((Mcycle,4))
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["cpp_optimize"] = True
for i in xrange(1,Mcycle+1):
print "Cycle # = ",i,"\n"
n = 2*n
N[i-1,0] = n
mesh = MeshGenerator(n)
V,u,v = CreateTrialTestFuncs(mesh)
u0 = Expression(('0','0'))
f=Expression(("2+x[1]*(1-x[1])","2+x[0]*(1-x[0])"))
tic()
A,b = AssembleSystem(V,u,v,f,u0,1,"PETSc")
time[i-1,0] = toc()
DoF[i-1,0] = b.size()
# u = SolveSystem(A,b,V,"cg","amg",1e-6,1e-6,1)
# ue = Expression(("x[1]*(1-x[1])","x[0]*(1-x[0])"))
# error = Error(u,ue)
table[i-1,0] = i-1
table[i-1,1] = n
table[i-1,2] = b.size()
table[i-1,3] = time[i-1,0]
print matrix2latex(table)
loglog(DoF,time)
show() | mit |
anntzer/scikit-learn | examples/release_highlights/plot_release_highlights_0_22_0.py | 10 | 10186 | """
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates :class:`~metrics.plot_roc_curve`,
# but other plots utilities are supported like
# :class:`~inspection.plot_partial_dependence`,
# :class:`~metrics.plot_precision_recall_curve`, and
# :class:`~metrics.plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
svc_disp = plot_roc_curve(svc, X_test, y_test)
rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
('svr', make_pipeline(StandardScaler(),
LinearSVC(random_state=42)))
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression()
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42
)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f'x_{i}' for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=feature_names[sorted_idx])
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.neighbors import KNeighborsTransformer
from sklearn.manifold import Isomap
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode='distance'),
Isomap(n_neighbors=10, metric='precomputed'),
memory=tmpdir)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~metrics.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print("Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print("Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml('titanic', version=1, as_frame=True)
print(titanic.data.head()[['pclass', 'embarked']])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape='ovo', probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class='ovo'))
| bsd-3-clause |
mantidproject/mantid | scripts/test/directtools/DirectToolsTest.py | 3 | 21716 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# Set matplotlib backend to AGG before anything else. Otherwise some build servers
# need to have extra packages (tkinter) installed.
import directtools
from mantid.api import mtd
from mantid.simpleapi import (AddSampleLog, CloneWorkspace, ComputeIncoherentDOS, ConvertSpectrumAxis,
CreateSampleWorkspace,
CreateWorkspace, DirectILLCollectData, DeleteWorkspace, DirectILLReduction, LoadILLTOF,
MoveInstrumentComponent, SetInstrumentParameter, Transpose)
import numpy
import numpy.testing
import testhelpers
import unittest
import matplotlib
matplotlib.use('AGG')
class DirectToolsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
workspace = DirectILLCollectData('ILL/IN4/084446.nxs', EPPCreationMethod='Calculate EPP',
IncidentEnergyCalibration='Energy Calibration OFF',
FlatBkg='Flat Bkg OFF', Normalisation='Normalisation OFF',
StoreInADS=False)
cls._sqw = DirectILLReduction(workspace, OutputWorkspace='unused', StoreInADS=False)
def tearDown(self):
mtd.clear()
def _box2DSetup(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (3, 1))
vertAxis = numpy.array([-2, -1, 0, 1])
return xs, vertAxis
def test_box2D_defaults(self):
xs, vertAxis = self._box2DSetup()
vertAxis = numpy.array([-2, -1, 0, 1])
box = directtools.box2D(xs, vertAxis)
numpy.testing.assert_equal(xs[box], xs)
def test_box2D_horMin(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, horMin=0)
expected = numpy.tile(numpy.array([0, 2, 4, 5]), (3, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_horMax(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, horMax=5)
expected = numpy.tile(numpy.array([-1, 0, 2, 4]), (3, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_vertMin(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, vertMin=-1)
expected = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (2, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_vertMax(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, vertMax=-1)
expected = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (1, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_configurematplotlib(self):
defaultParams = directtools.defaultrcparams()
directtools._configurematplotlib(defaultParams)
for key in defaultParams:
self.assertTrue(key in matplotlib.rcParams)
self.assertEqual(matplotlib.rcParams[key], defaultParams[key])
def test_defaultrcParams(self):
result = directtools.defaultrcparams()
self.assertEqual(result, {'legend.numpoints': 1})
def test_dynamicsusceptibility(self):
xs = numpy.array([-1, 0, 1])
ys = numpy.array([1, 1])
vertX = numpy.array([-1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='DeltaE', VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=vertX,
StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100.)
self.assertEqual(wsOut.YUnitLabel(), 'Dynamic susceptibility')
xs = numpy.array([0, 1, 0, 1])
ys = numpy.array([1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2, UnitX='MomentumTransfer', VerticalAxisUnit='DeltaE',
VerticalAxisValues=vertX,
StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100.)
self.assertEqual(wsOut.YUnitLabel(), 'Dynamic susceptibility')
def test_dynamicsusceptibility_removesingularity(self):
xs = numpy.array([-0.7, -0.4, -0.1, 0.2, 0.5])
ys = numpy.array([2, 2, 2, 2])
es = numpy.sqrt(ys)
vertX = numpy.array([-1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, DataE=es, NSpec=1, UnitX='DeltaE', VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=vertX, StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100., zeroEnergyEpsilon=0.13)
numpy.testing.assert_equal(wsOut.readX(0), xs)
outYs = wsOut.readY(0)
outEs = wsOut.readE(0)
self.assertEqual(outYs[2], 0.)
self.assertEqual(outEs[2], 0.)
def test_mantidsubplotsetup(self):
result = directtools._mantidsubplotsetup()
self.assertEqual(result, {'projection': 'mantid'})
def _nanminmaxSetup(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), 3)
ys = numpy.linspace(-5, 3, 4 * 3)
vertAxis = numpy.array([-3, -1, 2, 4])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=3, VerticalAxisUnit='Degrees', VerticalAxisValues=vertAxis,
StoreInADS=False)
return ws
def test_nanminmax_defaults(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_horMin(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, horMin=0)
self.assertEqual(cMin, ys[0, 1])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_horMax(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, horMax=4)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[2, -2])
def test_nanminmax_vertMin(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, vertMin=-1)
self.assertEqual(cMin, ys[1, 0])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_vertMax(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, vertMax=2)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[1, -1])
def test_plotconstE_nonListArgsExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E': -1.,
'dE': 1.5
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_wsListExecutes(self):
kwargs = {
'workspaces': [self._sqw, self._sqw],
'E': -2.,
'dE': 1.5,
'style': 'l'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_EListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E': [-3., 4.],
'dE': 1.5,
'style': 'm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_dEListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E': 3.,
'dE': [1.5, 15.],
'style': 'lm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_loglog(self):
kwargs = {
'workspaces': self._sqw,
'E': -10.,
'dE': 1.5,
'xscale': 'log',
'yscale': 'log'
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
self.assertEqual(axes.get_xscale(), 'log')
self.assertEqual(axes.get_yscale(), 'log')
def test_plotconstE_legendLabels(self):
kwargs = {
'workspaces': self._sqw,
'E': -1.,
'dE': [0.5, 1.0],
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
hangles, labels = axes.get_legend_handles_labels()
self.assertEqual(labels, [r' $E$ = -1.00 $\pm$ 0.57 meV', r' $E$ = -1.01 $\pm$ 1.02 meV'])
def test_plotconstQ_nonListArgsExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q': 2.3,
'dQ': 0.3
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_wsListExecutes(self):
kwargs = {
'workspaces': [self._sqw, self._sqw],
'Q': 2.4,
'dQ': 0.42,
'style': 'l'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_QListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q': [1.8, 3.1],
'dQ': 0.32,
'style': 'm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_dQListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q': 1.9,
'dQ': [0.2, 0.4],
'style': 'ml'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_loglog(self):
kwargs = {
'workspaces': self._sqw,
'Q': 2.6,
'dQ': 0.1,
'xscale': 'log',
'yscale': 'log'
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
self.assertEqual(axes.get_xscale(), 'log')
self.assertEqual(axes.get_yscale(), 'log')
def test_plotconstQ_legendLabels(self):
kwargs = {
'workspaces': self._sqw,
'Q': 1.9,
'dQ': [0.2, 0.4],
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
handles, labels = axes.get_legend_handles_labels()
self.assertEqual(labels, [r' $Q$ = 1.91 $\pm$ 0.21 $\mathrm{\AA}^{-1}$',
r' $Q$ = 1.91 $\pm$ 0.41 $\mathrm{\AA}^{-1}$'])
def test_plotconstQ_titles(self):
kwargs = {
'workspaces': self._sqw,
'Q': 1.9,
'dQ': 0.2,
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
titleLines = axes.get_title().split('\n')
self.assertEqual(len(titleLines), 4)
kwargs = {
'workspaces': self._sqw,
'Q': [0.9, 1.9],
'dQ': 0.2,
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
titleLines = axes.get_title().split('\n')
self.assertEqual(len(titleLines), 3)
kwargs = {
'workspaces': [self._sqw, self._sqw],
'Q': 0.9,
'dQ': 0.2,
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
titleLines = axes.get_title().split('\n')
self.assertEqual(len(titleLines), 2)
kwargs = {
'workspaces': [self._sqw, self._sqw],
'Q': [0.9, 1.9],
'dQ': 0.2,
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
titleLines = axes.get_title().split('\n')
self.assertEqual(len(titleLines), 1)
def test_plotconstE_and_plotconstQ_plot_equal_value_at_crossing(self):
Q = 2.512
figure, axes, cuts = directtools.plotconstQ(self._sqw, Q, 0.01)
lineDataQ = axes.get_lines()[0].get_data()
E = 2.2
figure, axes, cuts = directtools.plotconstE(self._sqw, E, 0.01)
lineDataE = axes.get_lines()[0].get_data()
indexE = numpy.argmin(numpy.abs(lineDataQ[0] - E))
indexQ = numpy.argmin(numpy.abs(lineDataE[0] - Q))
self.assertEqual(lineDataQ[1][indexE], lineDataE[1][indexQ])
def test_plotcuts_keepCutWorkspaces(self):
kwargs = {
'direction': 'Vertical',
'workspaces': self._sqw,
'cuts': 1.9,
'widths': 0.8,
'quantity': 'TOF',
'unit': 'microseconds',
'keepCutWorkspaces': True
}
self.assertEqual(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEqual(len(cuts), 1)
self.assertEqual(mtd.size(), 1)
def test_plotcuts_doNotKeepCutWorkspaces(self):
kwargs = {
'direction': 'Vertical',
'workspaces': self._sqw,
'cuts': 2.0,
'widths': 0.7,
'quantity': 'TOF',
'unit': 'microseconds',
'keepCutWorkspaces': False
}
self.assertEqual(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEqual(len(cuts), 0)
self.assertEqual(mtd.size(), 0)
def test_plotcuts_loglog(self):
kwargs = {
'direction': 'Vertical',
'workspaces': self._sqw,
'cuts': 2.1,
'widths': 0.6,
'quantity': 'TOF',
'unit': 'microseconds',
'xscale': 'log',
'yscale': 'log'
}
self.assertEqual(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEqual(axes.get_xscale(), 'log')
self.assertEqual(axes.get_yscale(), 'log')
def test_plotprofiles_noXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEqual(axes.get_xlabel(), '')
self.assertEqual(axes.get_ylabel(), r'$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1]) / 2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_DeltaEXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='DeltaE', StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEqual(axes.get_xlabel(), 'Energy (meV)')
self.assertEqual(axes.get_ylabel(), r'$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1]) / 2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_MomentumTransferXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='MomentumTransfer', StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEqual(axes.get_xlabel(), r'$Q$ ($\mathrm{\AA}^{-1}$)')
self.assertEqual(axes.get_ylabel(), '$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1]) / 2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_loglog(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='MomentumTransfer', StoreInADS=False)
kwargs = {'workspaces': ws, 'xscale': 'log', 'yscale': 'log'}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEqual(axes.get_xscale(), 'log')
self.assertEqual(axes.get_yscale(), 'log')
def test_plotDOS_PlotSingle(self):
ws = CreateSampleWorkspace(NumBanks=1, XUnit='DeltaE', XMin=-12., XMax=12., BinWidth=0.2, StoreInADS=False)
MoveInstrumentComponent(ws, 'bank1', X=-0.5, StoreInADS=False)
ws = ConvertSpectrumAxis(ws, 'Theta', 'Direct', 14., StoreInADS=False)
SetInstrumentParameter(ws, ParameterName='deltaE-mode', Value='direct', StoreInADS=False)
AddSampleLog(ws, LogName='Ei', LogText=str(14.), LogType='Number', LogUnit='meV', StoreInADS=False)
stw = ComputeIncoherentDOS(ws, StoreInADS=False)
kwargs = {'workspaces': stw}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotDOS, **kwargs)
self.assertEqual(axes.get_xlabel(), 'Energy transfer ($meV$)')
self.assertEqual(axes.get_ylabel(), '$g(E)$')
def test_plotDOS_PlotMultiple(self):
ws = CreateSampleWorkspace(NumBanks=1, XUnit='DeltaE', XMin=-12., XMax=12., BinWidth=0.2, StoreInADS=False)
MoveInstrumentComponent(ws, 'bank1', X=-0.5, StoreInADS=False)
ws = ConvertSpectrumAxis(ws, 'Theta', 'Direct', 14., StoreInADS=False)
SetInstrumentParameter(ws, ParameterName='deltaE-mode', Value='direct', StoreInADS=False)
AddSampleLog(ws, LogName='Ei', LogText=str(14.), LogType='Number', LogUnit='meV', StoreInADS=False)
stw = ComputeIncoherentDOS(ws)
kwargs = {'workspaces': [stw, 'stw']}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotDOS, **kwargs)
self.assertEqual(axes.get_xlabel(), 'Energy transfer ($meV$)')
self.assertEqual(axes.get_ylabel(), '$g(E)$')
def test_plotSofQW(self):
wsName = 'ws'
CloneWorkspace(self._sqw, OutputWorkspace=wsName)
kwargs = {'workspace': wsName}
testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
DeleteWorkspace(wsName)
kwargs = {'workspace': self._sqw}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
self.assertEqual(len(figure.axes), 2)
colorbar_axes = figure.axes[-1]
self.assertEqual(colorbar_axes.get_ylabel(), r'$S(Q,E)$ (arb. units)')
def test_plotSofQW_transposed(self):
wsName = 'ws'
CloneWorkspace(self._sqw, OutputWorkspace=wsName)
Transpose(wsName, OutputWorkspace=wsName)
kwargs = {'workspace': wsName}
testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
DeleteWorkspace(wsName)
kwargs = {'workspace': self._sqw}
testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
def test_plotSofQW_dynamicsusceptibility(self):
xs = numpy.array([-2, -1, 0, 1, 2])
ys = numpy.tile(numpy.array([1, 1, 1, 1, 1]), 10)
vertX = numpy.array([-4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=10, UnitX='MomentumTransfer', VerticalAxisUnit='DeltaE',
VerticalAxisValues=vertX,
StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100.)
kwargs = {'workspace': wsOut}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
self.assertEqual(len(figure.axes), 2)
colorbar_axes = figure.axes[-1]
self.assertEqual(colorbar_axes.get_ylabel(), r"$\chi''(Q,E)$ (arb. units)")
self.assertEqual(axes.get_ylim()[0], 0.)
def test_subplots(self):
testhelpers.assertRaisesNothing(self, directtools.subplots)
def test_validQ(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), 3)
nPoints = 4
ys = numpy.tile(numpy.zeros(nPoints), 3)
ys[nPoints] = numpy.nan
ys[2 * nPoints - 1] = numpy.nan
vertAxis = numpy.array([-3, -1, 2, 4])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=3, UnitX='MomentumTransfer',
VerticalAxisUnit='Degrees', VerticalAxisValues=vertAxis, StoreInADS=False)
qMin, qMax = directtools.validQ(ws, -2.5)
self.assertEqual(qMin, xs[0])
self.assertEqual(qMax, xs[-1])
qMin, qMax = directtools.validQ(ws, 0)
self.assertEqual(qMin, xs[1])
self.assertEqual(qMax, xs[-2])
def test_wsreport(self):
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': self._sqw})
in5WS = LoadILLTOF('ILL/IN5/104007.nxs', StoreInADS=False)
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': in5WS})
in6WS = LoadILLTOF('ILL/IN6/164192.nxs', StoreInADS=False)
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': in6WS})
def test_SampleLogs(self):
ws = CreateSampleWorkspace(NumBanks=1, BankPixelWidth=1)
ws.mutableRun().addProperty('a', 7, True)
ws.mutableRun().addProperty('b.c', 13, True)
logs = directtools.SampleLogs(ws)
self.assertTrue(hasattr(logs, 'a'))
self.assertEqual(logs.a, 7)
self.assertTrue(hasattr(logs, 'b'))
self.assertTrue(hasattr(logs.b, 'c'))
self.assertEqual(logs.b.c, 13)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/test_multilevel.py | 7 | 92692 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
import datetime
import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.types.common import is_float_dtype, is_integer_dtype
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, product as
cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
tm._skip_if_no_pytz()
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
self.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
self.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
self.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
self.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')] +
expected_tuples), None)
self.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assertIsInstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = self.round_trip_pickle(frame)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
self.assertEqual(str(df2).splitlines()[0].split(), ['red'])
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assertTrue(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
# TODO(wesm): unused?
# result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEqual(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assertTrue(isnull(s.values[42:65]).all())
self.assertTrue(notnull(s.values[:42]).all())
self.assertTrue(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assertTrue(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assertTrue((cp.values[:4] == 0).all())
self.assertTrue((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
assert_series_equal(sliced_a1, sliced_b1, check_names=False)
assert_series_equal(sliced_a2, sliced_b1, check_names=False)
self.assertEqual(sliced_a1.name, ('A', '1'))
self.assertEqual(sliced_a2.name, ('A', '2'))
self.assertEqual(sliced_b1.name, ('B', '1'))
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a', 'abcde', 1),
('b', 'bbcde', 2),
('y', 'yzcde', 25),
('z', 'xbcde', 24),
('z', None, 26),
('z', 'zbcde', 25),
('z', 'ybcde', 26),
]
df = DataFrame(acc,
columns=['a1', 'a2', 'cnt']).set_index(['a1', 'a2'])
expected = DataFrame({'cnt': [24, 26, 25, 26]}, index=Index(
['xbcde', np.nan, 'zbcde', 'ybcde'], name='a2'))
result = df.xs('z', level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1,
0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'), (
'p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assertTrue((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assertTrue((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assertIsInstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEqual(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEqual(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sortlevel(0)
self.assertTrue((result.dtypes.values == df.dtypes.values).all())
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in cart_product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assertTrue(is_integer_dtype(deleveled['prm1']))
self.assertTrue(is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assertIsInstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assertIsInstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns,
pd.Index(['A', 'B', 'C'], name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
labels=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with assertRaisesRegexp(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with assertRaisesRegexp(IndexError, "not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
assert_series_equal(left, right)
self.assertFalse(left.index.is_unique)
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
result = df['foo'].stack()
assert_series_equal(stacked['foo'], result, check_names=False)
self.assertIs(result.name, None)
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEqual(unstacked.index.name, 'first')
self.assertEqual(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEqual(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEqual(unstacked.columns.names, expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEqual(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with assertRaisesRegexp(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with assertRaisesRegexp(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with assertRaisesRegexp(IndexError, "not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEqual(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assertTrue((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assertFalse(np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False
) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
self.assertFalse(swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
assert_series_equal(swapped, swapped3)
assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
self.assertTrue(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
assert_series_equal(back, back3)
assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2})
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
for result in (panel.swaplevel(axis='major'),
panel.swaplevel(0, axis='major'),
panel.swaplevel(0, 1, axis='major')):
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assertIsInstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
self.assertTrue(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
self.assertFalse(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
self.assertFalse(index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assertTrue((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assertTrue((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'b')
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'a')
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assert_index_equal(leftside._get_axis(axis), level_index)
self.assert_index_equal(rightside._get_axis(axis), level_index)
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False
) # TODO groupby with level_values drops names
self.assertEqual(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEqual(result.index.names, self.ymd.index.names[1:])
self.assertEqual(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEqual(result.index.name, self.ymd.index.names[2])
self.assertEqual(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEqual(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)] +
[labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar', 'three'), 'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar', 'three'), 'B'], 0)
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assertTrue((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
# ---------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result, check_names=False)
assert_frame_equal(df1, df2)
self.assertEqual(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = pd.Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = pd.Timestamp('201603231600')
self.assertFalse(df.index.is_unique)
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples([('1a', '2a'), ('1a', '2b'), ('1a', '2c')
])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assertTrue(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assertTrue((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assertTrue((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'), (
'Colorado', 'Green')])
index = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)
])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
tm.assertIsInstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assertTrue((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEqual(s[("a", 5)], 5)
self.assertEqual(s[("a", 6)], 6)
self.assertEqual(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEqual(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sortlevel(0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
# deprecate take_last
expected = np.array([True, False, False, False, False, False])
with tm.assert_produces_warning(FutureWarning):
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(
idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
self.assert_index_equal(idx.levels[0], expected1)
self.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = pd.MultiIndex.from_product([[d1], [d2]])
self.assertIsInstance(index.levels[0], pd.DatetimeIndex)
self.assertIsInstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
self.assert_index_equal(df.index.levels[0], expected)
self.assert_index_equal(df.index.levels[1],
pd.Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
self.assert_index_equal(df.index.levels[0],
pd.Index(['a', 'b'], name='label'))
self.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
self.assert_index_equal(df.index.levels[0], expected1)
self.assert_index_equal(df.index.levels[1], expected2)
self.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
self.assert_index_equal(df.index.get_level_values(0), idx1)
self.assert_index_equal(df.index.get_level_values(1), idx2)
self.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = pd.Index(range(5), name='idx2', dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: pd.Timestamp(d, tz='Europe/Paris'))
assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = pd.DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: pd.Timestamp(d, freq='D', tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101',
periods=3, freq='M'),
['a', 'b', 'c']],
names=['month', 'feature'])
df = pd.DataFrame(np.arange(9, dtype='int64')
.reshape(-1, 1),
index=idx, columns=['a'])
expected = pd.DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
assert_frame_equal(df.reset_index(), expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
self.assert_index_equal(df.index.levels[0], expected1)
self.assert_index_equal(df.index.levels[1], expected2)
self.assert_index_equal(df.index.levels[2], idx3)
self.assert_index_equal(df.index.get_level_values(0), idx1)
self.assert_index_equal(df.index.get_level_values(1), idx2)
self.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = pd.MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = pd.Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
def test_iloc_mi(self):
# GH 13797
# Test if iloc can handle integer locations in MultiIndexed DataFrame
data = [
['str00', 'str01'],
['str10', 'str11'],
['str20', 'srt21'],
['str30', 'str31'],
['str40', 'str41']
]
mi = pd.MultiIndex.from_tuples(
[('CC', 'A'),
('CC', 'B'),
('CC', 'B'),
('BB', 'a'),
('BB', 'b')
])
expected = pd.DataFrame(data)
df_mi = pd.DataFrame(data, index=mi)
result = pd.DataFrame([[df_mi.iloc[r, c] for c in range(2)]
for r in range(5)])
assert_frame_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
kimlaborg/NGSKit | ngskit/oligolib_generator.py | 1 | 33136 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 11:19:51 2015
Small script and library of functions to help to create the in house
oligochips.
@author: ccorbi
Example
-------
> from oligolib_generator import *
> disrd_CONSTANT_F = 'CAGCCTCTTCATCTGGC'
> disrd_CONSTANT_R = 'GGTGGAGGATCCGGAG'
> peptides = {'RTLSQLYCSYGPLT': 'MUS81',
'NPFREKKFFCTIL': 'GNG4',
'TEGPDSD': 'SFN',
'PSSLAYSLKKH': 'INCEP',
'ETFSDLW': 'MDM2'}
> for seq,_id in peptides.items():
... lib = peptide_library(template=seq,
CONSTANT_F = 'CAGCCTCTTCATCTGGC',
CONSTANT_R = 'GGTGGAGGATCCGGAG')
... lib.generate_single_variants()
... lib.write('./fasta/{}_disorderome_CR.fasta'.format(_id), extend=87)
Notes
-----
Tested on python 2.7. 3.6
To Do ::
Finish stand alone, configration and arguments passing
Improve Testing by using pytests, and logging
"""
from __future__ import print_function
import sys
import random
import itertools
import json
import logging
import argparse
import time
from collections import OrderedDict
import Levenshtein as lstn
from tqdm.auto import tqdm
from ngskit.utils.dna import (translate2aa, translate2na, clean_restriction_sites)
class peptide_library(object):
"""Main Class to Generate library.
"""
# compatible class arguments
def __init__(self, template=None, **kwargs):
"""Init.
Parameters
----------
seq : str, optional
Template sequence to build the library
Use None when you want to load variables from a file.
If a template is not provided, all the generation options
are disabled, thought.
Kwargs:
-------
lib_name : str, optional
Name of the library
CONSTANT_F : str
Forward constant region
first constant region 5'
CONSTANT_R : str
Reverse constant region
second constant region 3'
include_template : bool
The original template will be introduced in the library,
(default True)
restriction_enzymes : dict or list
dictionary or list with the restriction sites
(by default, dict_restriction = ['GAATTC','CCCGGG']
or ecorI, XmaI, salI
species : str
species of the lirbary. This determine the codon frequency usage,
(by default ('species':'human') accept 'E.coli')
See Also
--------
load_designs
write
change_template
"""
self._class_arguments = ['include_template',
'lib_name',
'CONSTANT_F',
'CONSTANT_R',
'lib_limit',
'restriction_enzymes',
'codon_usage_species']
# check the passed arguments
for k in kwargs:
if k not in self._class_arguments:
raise ValueError("{} got an unexpected keyword argument '{}' ".format(self.__class__.__name__, k))
# init template sequence
if isinstance(template, str):
self.template = Template(template)
# Include template in the library
self.include_template = kwargs.get('include_template', True)
# Optional name of the lib, by default use the sequences
self.lib_name = kwargs.get('lib_name', self.template.seq)
else:
if template is None:
self.template = False
# Include template in the library
self.include_template = kwargs.get('include_template', False)
self.lib_name = kwargs.get('lib_name', 'No_Template')
else:
raise Exception
# Set up Constant regions
self.CONSTANT_F = kwargs.get('CONSTANT_F', '')
self.CONSTANT_R = kwargs.get('CONSTANT_R', '')
# limit for the library
self.lib_limit = kwargs.get('lib_limit', None)
# SET UP Restriction enzymes
self.restriction_enzymes = dict()
self.add_restriction_enzyme(kwargs.get('restriction_enzymes', dict()))
#{'ecorI': 'GAATTC',
#'XmaI': 'CCCGGG',
#'salI': 'GTCGAC'}
# Setup CODON usage and validate
self.codon_usage_species = kwargs.get('codon_usage_species', 'human') # human E.coli
if self.codon_usage_species not in ['human', 'E.coli']:
raise ValueError("{} is not supported codon usage ".format(self.codon_usage_species))
# Init Internal Variables
self._aalibrary = OrderedDict()
self._nalibrary = OrderedDict()
if self.include_template:
index_id = len(self._aalibrary)
self._aalibrary[template] = self.lib_name + '_OO_' + str(index_id)
return
def __call__(self):
self.info()
def __len__(self):
return len(self._aalibrary)
def info(self):
"""ToDo: Not sure if print is the best output."""
for arg in self._class_arguments:
print("{} :\t{}".format(arg, self.__getattribute__(arg)))
n = len(self._aalibrary)
print("{} :\t{}".format('Seq in the lib', n))
return
def load_designs(self, seq_file, **kwargs):
""" Load a file with the seq of designs for the template. No headers
By default overwrite identical previous designs.
Parameters
----------
seq_file : str
Path and name of the sequence file
Kwargs:
-------
sep : str, optional
separation of columns (default None)
column : int, optional
column were the sequences are (default 0)
suffix : srt, optional
Suffix of the variants (default _MD_)
Raises:
------
ValueError
If lenght of the sequence in the file is different
from the template
Returns
-------
"""
# Set up suffix for seq id purpuses
# _MD_ for default
# Manual Desings
suffix = kwargs.get('suffix', '_MD_')
# Load file cfg ToDo use pandas
sep = kwargs.get('sep', None)
column = kwargs.get('column', 0)
# Open file
with open(seq_file, 'r') as input_file:
for line in tqdm(input_file):
# if the file is empty or is a comment, skip
if len(line.strip()) == 0 or line.startswith('#'):
continue
if sep != False:
try:
if sep == '':
seq = line.split()[column]
else:
seq = line.split(sep)[column]
except:
print('ERROR!: Reading File {} on:'.format(seq_file))
print(line)
raise
else:
seq = line.strip()
if not seq in self._aalibrary:
# check library limit
if self._lib_notfull() is False:
self._aalibrary[seq] = self._name_seq(suffix=suffix)
return
def write(self, file_name='', add_stop_codon=True, extend=False, **kwargs):
"""Translate and Write the library in fasta format.
Parameters
----------
file_name : str
Ouput file name, by default library name.fasta
stop_end : bool
Add a double stop codon after the design, by default True
extend : int
Add stop codons to extend the final oligo sequences
to the given legnth, the constant regions are included by default No.
Returns
-------
"""
print('WARNING: ADDING_STOP: {} - EXTENDING SEQUENCES {}'.format(add_stop_codon, extend) )
if file_name == '':
file_name = self.lib_name + '.fasta'
output = open(file_name, 'w')
CONSTANT_F = kwargs.get('CONSTANT_F', self.CONSTANT_F)
CONSTANT_R = kwargs.get('CONSTANT_R', self.CONSTANT_R)
# This can be improved
SPACER = 'TGATAATAGTAATAGTGATAGTGATAATGATAATGA'
for seq, seq_id in tqdm(self._aalibrary.items()):
seq_dna = translate2na(seq, species=self.codon_usage_species)
# check for the restriction sites
seq_dna_checked = clean_restriction_sites(seq_dna, flank1 = CONSTANT_F[-1], flank2 = CONSTANT_R[0], dict_restriction = self.restriction_enzymes)
# add double stop codons at the end
if add_stop_codon:
stop = 'TGATAA'
else:
stop = ''
# Extend Oligo to reach a concrete length
if extend:
# Determine the legnth of the spacer sequence
base_oligo = len(seq_dna_checked) + len(CONSTANT_F) + len(CONSTANT_R) + len(stop)
spacer_len = extend - base_oligo
if spacer_len < 0:
raise ValueError(
'ERROR, refill len is {} and the raw oligo is {}'.format(extend, base_oligo))
refill_seq = SPACER[:spacer_len]
else:
refill_seq = ''
# save in fasta format
print('>{}_{}'.format(seq_id, seq), file=output)
print(CONSTANT_F + seq_dna_checked.lower() +
stop + refill_seq + CONSTANT_R, file=output)
# internal saving
self._nalibrary[seq_id] = CONSTANT_F + seq_dna_checked.lower() + stop + refill_seq + CONSTANT_R
output.close()
return
def change_template(self, seq):
'''Change or add a the template '''
self.template = Template(seq)
return
def add_restriction_enzyme(self, enzymes):
"""Add restriction enzyme definition.
Restriction enzymes definitions are restrictions used when the AA library is converted
to NA. see write fucntion.
Parameters
----------
enzymes : dict, list
Restriction enzyme definition, dictionary `name: target_sequence`, or if is a list
provide only the `target_sequences` and a number will be provided as a name
Returns
-------
"""
if isinstance(enzymes, dict):
for name, enzym in enzymes.items():
self.restriction_enzymes[name] = enzym
if isinstance(enzymes, list):
idx = len(self.restriction_enzymes)
for name, enzym in enumerate(enzymes):
self.restriction_enzymes[name+idx] = enzym
return
def random_purge(self, nsize):
"""Remove randon N items from the library.
Parameters
----------
nsize : int
Number of random items to remove
"""
# TODO: Too Slow
for _ in range(nsize):
self._aalibrary.pop(random.choice(list(self._aalibrary)))
return
def generate_single_variants(self, **kwargs):
"""From a sequence (string), return a dictionary of single muntatns.
Parameters
----------
:param positions : List of position to be modified
:rtype: list
:param inrange: [start,end] coordinates inside the seq by default full length
:param bias: ['A','W'] aminoacids excluded or restrict
:rtype list
:param bias_type: how to handle the bias AA, exclude or restrict , by default exclude
:rtype str
Returns
-------
Raises
------
"""
if self.template is False:
raise Exception('A template must be defined to use this method')
# t = self.template
# if postions is not given use the entire lenght of the template
positions = kwargs.get('positions', range(self.template.len_))
# Add suffix to this peptides, by default _SV_ SingleVariant
suffix = kwargs.get('suffix', '_SV_')
for eachposition in positions:
for itera in self.template.get_all_mutants_in(eachposition, **kwargs):
a = ''.join(itera)
self._add_variant(a, suffix=suffix)
return
def generate_inframe_variants(self, frame_size=2, frame_interval=1, **kwargs):
"""From a sequence (string), return a dictionary of muntats in frame
TODO: change name to mutation screening, merge with generate_single_mutation
Parameters
----------
template_seq : str
Template sequence (string)
mutants_library : dict
dictionary with the library
template_name : str
id name for the sequences (string)
frame_size : int
not working. (default 2)
inrange : list, tuple
[start,end] coordinates inside the seq by default full length
bias : list
list of aminoacids to exclude, i.e. ['A','W']
Returns
-------
"""
if not self.template:
raise Exception('A template must be defined to use this method')
t = self.template
suffix = kwargs.get('suffix', '_FM_')
for itera in t.frame_mutations(frame_size=frame_size, frame_interval=frame_interval, **kwargs):
a = ''.join(itera)
self._add_variant(a, suffix=suffix)
return
def generate_random_variants(self, n=1000, mutant_kind=[2], **kwargs):
"""From a sequence (string), return a dictionary of random muntats.
Parameters
----------
n : int, (default 1000)
how many random variants, if lib limit is reach it will stop
mutant_kind : list
Kind of mutants, double, triple, etc (list, by default double mutants [2])
inrange: list, tuple
[start,end] coordinates inside the seq by default full length
bias : list, tuple
['A','W'] aminoacids excluded
Returns
-------
"""
# mutants = {2:'double',3:'triple'}
# generate double, triple, etc random mutants
if not self.template:
raise Exception('A template must be defined to use this method')
t = self.template
# Random Variant _RA_
suffix = kwargs.get('suffix', '_RA_')
counter = 0
while counter < n:
for i in mutant_kind:
# Soft random, at least 50% of the wildtype
random_mutation = t.soft_randomization(num_mutations=i, inrange = kwargs.get('inrange', False), bias= kwargs.get('bias', False))
if self._add_variant(random_mutation, suffix=suffix):
counter += 1
return
def generate_scrambled_variants(self, n=1000, suffix= '_SC_'):
"""From a sequence (string), return a dictionary of random muntats.
Parameters
----------
n : int, (default 1000)
how many scrambled variants, if lib limit is reach it will stop
mutant_kind : list
Returns
-------
"""
if not self.template:
raise Exception('A template must be defined to use this method')
counter = 0
while counter < n:
scram_pep = self.template.scrambled()
if self._add_variant(scram_pep, suffix=suffix):
counter += 1
return
def single_json(self, jsonpath):
json_data = open(jsonpath).read()
cfg_data = json.loads(json_data)
for position, actions in cfg_data.items():
self.generate_single_variants(positions=[int(position)], **actions)
return
def permutations_json(self, jsonpath, **kwargs):
json_data = open(jsonpath).read()
cfg_data = json.loads(json_data)
permutation = []
# Permutation
suffix = kwargs.get('suffix', '_PR_')
for eachposition in range(self.template.len_):
permutation.append(cfg_data.get(str(eachposition), self.template.seq[eachposition]))
for i in itertools.product(*permutation):
variant = ''.join(list(i))
self._add_variant(variant, suffix=suffix)
return
###########
def _name_seq(self, suffix):
# Get index
index_id = len(self._aalibrary)
# I use _ as split of items in the fasta header
if '_' in self.lib_name:
lib_name = self.lib_name.replace('_', '-')
else:
lib_name = self.lib_name
# Set up suffix for seq id purpuses
return lib_name + suffix + str(index_id)
def _lib_notfull(self):
# check library limit
# _library_ limit_satuts
if self.lib_limit:
if len(self._aalibrary) >= self.lib_limit:
# convert this to a raise
print('WARNING!: Library limit reached -{}-'.format(self.lib_limit))
return True
else:
return False
else:
return False
def _add_variant(self, var, suffix='_MA_'):
if not var in self._aalibrary:
# check library limit
if self._lib_notfull() is False:
self._aalibrary[var] = self._name_seq(suffix=suffix)
return True
return False
###########
class Template(object):
def __init__(self, seq):
"""Template Class
Paramenters
-----------
seq : str seq:
Aminoacid sequence
"""
# Sequence
self.seq = seq
# len of the seq
self.len_ = len(seq)
# One letter code Amino acid list
self._aa_list = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
return
def __str__(self):
return self.seq
def mod_template(self, position, aa):
"""introduce a mutation in the seq in a determinate postion.
Can accept a sinple of multimple subsititutions
Paramenters
-----------
postion : int, list
Position or list of positions to change. from: 0->x.
aa : str
amminoacid one letter code, or list of aaminoacid.
must be syncronized with the positions
Returns
-------
str, list
sequence with the modifications
"""
variant = list(self.seq)
if isinstance(position, list):
if isinstance(aa, list):
for i in range(len(position)):
# print aa, position
variant[position[i]] = aa[i]
return variant
else:
for i in range(len(position)):
variant[position[i]] = aa
return variant
else:
variant[position] = aa
return variant
def get_aa_list(self, position, bias=False, bias_type='exclude', **Kargs):
"""Helper function. Return list of AA for a given position.
Remove from the list of potential mutations the
current AA in order to do not produce silent mutations, if
bias is activated, a list of AminoAcids included in bias,
is excluded if the bias type is only, then only the aa on
the list are considered as a option.
Paramenters
-----------
postion : int
Position to change. from: 0->x.
bias : list
list of aminoacids we do want to exclude or use
bias_type : str
how to handle the bias AA, exclude or restrict , by default exclude
Returns
-------
list
Returns a list with the potential aminoacids
"""
try:
aa_alternatives_list = list(self._aa_list)
a = self.seq[position]
aa_alternatives_list.remove(a)
except:
print(sys.exc_info())
print('{}\t{}\t{}'.format(position, self.seq, self.len_))
sys.exit()
if bias and (isinstance(bias, list) or isinstance(bias, tuple)):
if bias_type == 'exclude':
for aminoacid in bias:
# current postion aa check
if a != aminoacid:
aa_alternatives_list.remove(aminoacid)
elif bias_type == 'restrict':
aa_alternatives_list = bias
try:
aa_alternatives_list.remove(a)
except:
pass
else:
raise KeyError(
'bias_type unsupported: exclude or restrict are the only variables addmited')
# print aa_alternatives_list,a,position,'aqui'
return aa_alternatives_list
def random_pair_mutant(self, positions, **Kargs):
"""Return a seq with random mutations in a given list of positions
positions: list of integers 0->x
Paramenters
-----------
postions : list
Positions list of positions to change. from: 0->x.
bias : list
list of aminoacids we do want to use
Returns
-------
str
sequence with the modifications
"""
changes = []
for pos in positions:
# print self.get_aa_list(pos)
changes.append(random.choice(self.get_aa_list(pos, **Kargs)))
# print changes, 'primo'
return self.mod_template(positions, changes)
def get_all_mutants_in(self, position, **Kargs):
'''Return all the mutated sequences with the given position modified
Paramenters
-----------
:param postion: Position to change. from: 0->x.
:rtype: int
:param bias: list of aminoacids we do want to use
:rtype: list
:yields: str -- Yields a str with the all the mutations
'''
list_of_subtitutions = self.get_aa_list(position, **Kargs)
for a in list_of_subtitutions:
if a != self.seq[position]:
yield self.mod_template(position, a)
def soft_randomization(self, num_mutations=2, inrange=False, bias=False):
"""Randomly select N postions (num_mutations), and randomly
mutanted and return seq.
Paramenters
-----------
num_mutations : int
Number of positions to mutate
inrange : list, or tuple
list or tuple with [start,end] coordinates inside the seq
(by default full length)
bias : list
['A','W'] aminoacids excluded
Returns
-------
str Sequences with the mutations
"""
positions = []
if not inrange:
start = 0
end = self.len_ - 1
else:
start = inrange[0] - 1
end = inrange[1] - 1
peptide_positions = list(range(start, end + 1))
for i in range(0, num_mutations):
muta_at = (random.choice(peptide_positions))
peptide_positions.remove(muta_at)
positions.append(muta_at)
random_mutation = self.random_pair_mutant(positions, bias=bias)
random_mutation = ''.join(random_mutation)
return random_mutation
def frame_mutations(self, frame_size=2, frame_interval=1, inrange=False, shared_bias=False, **Kargs):
"""This method, return a sequence of corraleted mutations. Need improvement
suport more than a pair frame, allow specific bias for each position.
Paramenters
-----------
frame_size : int
Number of positions to mutate in frame
frame_interval : int
Number of positions between inframe mutations
(by default 1, means neighbour positions, 3 o 4 for helix is recommended)
inrange : list, or tuple
list or tuple with [start,end] coordinates inside the seq
(by default full length)
shared_bias : Bool
Aminoacids excluded (bias) is it shared
Yields
------
"""
if not inrange:
start = 0
end = self.len_ - 1
else:
start = inrange[0] - 1
end = inrange[1] - 1
for i in range(start, end + 1):
if i != end and i+frame_interval<end:
for aa1 in self.get_aa_list(i, bias=shared_bias, **Kargs):
for aa2 in self.get_aa_list(i + frame_interval, bias=shared_bias, **Kargs):
yield self.mod_template([i, i + frame_interval], [aa1, aa2])
def scrambled(self):
"""Generate a random scrambled peptide from the template.
Parameters
----------
Returns
-------
str
"""
scram = list()
seq = [x for x in self.seq]
random.shuffle(seq)
for a in seq:
scram.append(a)
return ''.join(scram)
def check_lib_integrty(file_library, master_seq, oligo_length,
CONSTANT_F='CAGCCTCTTCATCTGGC',
CONSTANT_R='GGTGGAGGATCCGGAG',
restriction_site=['GAATTC', 'CCCGGG', 'GTCGAC']):
"""Check if the librar complains with restrictions sites and lenght setup.
Parameters
----------
Returns
-------
"""
# create dict seq
dict_seq = {}
with open(file_library, 'r') as input_file:
for line in input_file:
if line[0] == '>':
line = line.strip()
pep, mut, num, aaseq = line.split('_')
seq = next(input_file)
seq = seq.strip()
seq = seq.upper()
# print seaq
# check length
if len(seq) != oligo_length:
print('ERROR Length on {} {} {} {}'.format(pep, mut, num, seq))
# check restriction sites
for site in restriction_site:
if seq.find(site) != -1:
print('RESTRICTION SITE FOUND {} at {} on {} {} {} {}'.format(site,
seq.find(
site),
pep,
mut,
num,
seq))
skip = len(CONSTANT_F)
template = translate2aa(seq[skip:len(aaseq) * 3 + skip])
if aaseq != template:
print('ERROR on translation {} {} {} {} {} {}'.format(pep, mut, num, aaseq,
template, seq))
if mut == 'SV':
if lstn.distance(master_seq, template) != 1:
print('ERROR more mut than spectected in a SV {} {} {} {} {} {}'.format(pep,
mut,
num,
master_seq,
template,
seq))
if mut == 'RA':
if lstn.distance(master_seq, template) != 2:
print('ERROR more mut than spectected in a RA {} {} {} {} {} {}'.format(pep,
mut,
num,
master_seq,
template,
seq))
if seq.find(CONSTANT_F) != 0:
print('ERROR with the contatn region R {} {} {} {} {} {}'.format(pep,
mut,
num,
aaseq,
template,
seq))
if seq.find(CONSTANT_R) + len(CONSTANT_R) != oligo_length:
print('ERROR with the contatn region L {} {} {} {} {} {}'.format(pep,
mut,
num,
aaseq,
template,
seq))
# print seq.find(constan1t_L)+len(constant_L)
# if len(template) != len(AA):
# print 'ERROR with the len', pep,mut,num,aaseq,template,seq
if template in dict_seq:
print('ERROR seq duplicated {} {} {} {} {} {}'.format(pep,
mut,
num,
aaseq,
template,
seq))
else:
dict_seq[template] = 1
# quick methods
# Alanine scanning Library
def Alanine_scanning(pep_library, AA='A', **Kargs):
# aminoacids = ['I','L','V','F',' M','C','G','A','P','T','S','Y','W','Q','N',
# 'E','D','K','R']
# aminoacids.remove(AA)
pep_library.generate_single_variants(bias='A', bias_type='restrict', **Kargs)
return pep_library
# need to be improved
# Postion Scanning Library
# def position_scanning(pep_library,inrange)
# screen_library(inrage=)
# Overlapping Peptide Library
# def overlapping_pep_lib(pep_library,overlap,**Kargs):
# pep_library.generate_inframe_variants(self,frame_size=overlap,**Kargs)
# return pep_library
# change inframe for linked mutations
def setup_options():
parser = argparse.ArgumentParser("""
Generation Libraries
Usage: %prog -t TEMPLATE_SEQ -options
Run `%prog -h` for help information
""")
# actions
# Kind of generations
# Parameters of the generators
# restriction ezymes
# constant regions
# config file?
parser.add_option('-t', '--template', dest='template_seq',
default=False, help='')
(opts, args) = parser.parse_args()
if not opts.template_seq:
parser.error('A template seq not given')
return opts, args
if __name__ == '__main__':
# test_library_mutations()
pass
# TODO
# implement Quick methods
# Alanine scanning
#
# Postion Scanning Library
# Random library
# scrambled Library
# Options functionalality
# import pandas as pd
# pd.read_cvs('ecoli.freq',delim_whitespace=True)
#
# LOG_FILENAME = 'default.log'
# logging.basicConfig(filename=LOG_FILENAME,
# level=logging.DEBUG,
# )
# logg = logging.getLogger(__name__)
# logg.debug('This message should go to the log file')
#
#
| mit |
zihua/scikit-learn | sklearn/linear_model/logistic.py | 2 | 67750 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
Jozhogg/iris | docs/iris/example_code/Meteorology/lagged_ensemble.py | 6 | 5936 | """
Seasonal ensemble model plots
=============================
This example demonstrates the loading of a lagged ensemble dataset from the GloSea4 model, which is then used to
produce two types of plot:
* The first shows the "postage stamp" style image with an array of 14 images, one for each ensemble member with
a shared colorbar. (The missing image in this example represents ensemble member number 6 which was a failed run)
* The second plot shows the data limited to a region of interest, in this case a region defined for forecasting
ENSO (El Nino-Southern Oscillation), which, for the purposes of this example, has had the ensemble mean subtracted
from each ensemble member to give an anomaly surface temperature. In practice a better approach would be to take the
climatological mean, calibrated to the model, from each ensemble member.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.plot as iplt
def realization_metadata(cube, field, fname):
"""
A function which modifies the cube's metadata to add a "realization" (ensemble member) coordinate from the filename if one
doesn't already exist in the cube.
"""
# add an ensemble member coordinate if one doesn't already exist
if not cube.coords('realization'):
# the ensemble member is encoded in the filename as *_???.pp where ??? is the ensemble member
realization_number = fname[-6:-3]
import iris.coords
realization_coord = iris.coords.AuxCoord(np.int32(realization_number), 'realization')
cube.add_aux_coord(realization_coord)
def main():
# extract surface temperature cubes which have an ensemble member coordinate, adding appropriate lagged ensemble metadata
surface_temp = iris.load_cube(iris.sample_data_path('GloSea4', 'ensemble_???.pp'),
iris.Constraint('surface_temperature', realization=lambda value: True),
callback=realization_metadata,
)
# ----------------------------------------------------------------------------------------------------------------
# Plot #1: Ensemble postage stamps
# ----------------------------------------------------------------------------------------------------------------
# for the purposes of this example, take the last time element of the cube
last_timestep = surface_temp[:, -1, :, :]
# Make 50 evenly spaced levels which span the dataset
contour_levels = np.linspace(np.min(last_timestep.data), np.max(last_timestep.data), 50)
# Create a wider than normal figure to support our many plots
plt.figure(figsize=(12, 6), dpi=100)
# Also manually adjust the spacings which are used when creating subplots
plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05, left=0.075, right=0.925)
# iterate over all possible latitude longitude slices
for cube in last_timestep.slices(['latitude', 'longitude']):
# get the ensemble member number from the ensemble coordinate
ens_member = cube.coord('realization').points[0]
# plot the data in a 4x4 grid, with each plot's position in the grid being determined by ensemble member number
# the special case for the 13th ensemble member is to have the plot at the bottom right
if ens_member == 13:
plt.subplot(4, 4, 16)
else:
plt.subplot(4, 4, ens_member+1)
cf = iplt.contourf(cube, contour_levels)
# add coastlines
plt.gca().coastlines()
# make an axes to put the shared colorbar in
colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
colorbar.set_label('%s' % last_timestep.units)
# limit the colorbar to 8 tick marks
import matplotlib.ticker
colorbar.locator = matplotlib.ticker.MaxNLocator(8)
colorbar.update_ticks()
# get the time for the entire plot
time_coord = last_timestep.coord('time')
time = time_coord.units.num2date(time_coord.bounds[0, 0])
# set a global title for the postage stamps with the date formated by "monthname year"
plt.suptitle('Surface temperature ensemble forecasts for %s' % time.strftime('%B %Y'))
iplt.show()
# ----------------------------------------------------------------------------------------------------------------
# Plot #2: ENSO plumes
# ----------------------------------------------------------------------------------------------------------------
# Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint which matches this
nino_3_4_constraint = iris.Constraint(longitude=lambda v: -170+360 <= v <= -120+360, latitude=lambda v: -5 <= v <= 5)
nino_cube = surface_temp.extract(nino_3_4_constraint)
# Subsetting a circular longitude coordinate always results in a circular coordinate, so set the coordinate to be non-circular
nino_cube.coord('longitude').circular = False
# Calculate the horizontal mean for the nino region
mean = nino_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)
# Calculate the ensemble mean of the horizontal mean. To do this, remove the "forecast_period" and
# "forecast_reference_time" coordinates which span both "relalization" and "time".
mean.remove_coord("forecast_reference_time")
mean.remove_coord("forecast_period")
ensemble_mean = mean.collapsed('realization', iris.analysis.MEAN)
# take the ensemble mean from each ensemble member
mean -= ensemble_mean.data
plt.figure()
for ensemble_member in mean.slices(['time']):
# draw each ensemble member as a dashed line in black
iplt.plot(ensemble_member, '--k')
plt.title('Mean temperature anomaly for ENSO 3.4 region')
plt.xlabel('Time')
plt.ylabel('Temperature anomaly / K')
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_compute_source_psd_epochs.py | 19 | 2826 | """
=====================================================================
Compute Power Spectral Density of inverse solution from single epochs
=====================================================================
Compute PSD of dSPM inverse solution on single trial epochs restricted
to a brain label. The PSD is computed using a multi-taper method with
Discrete Prolate Spheroidal Sequence (DPSS) windows.
"""
# Author: Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
event_id, tmin, tmax = 1, -0.2, 0.5
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Set up pick list
include = []
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# define frequencies of interest
fmin, fmax = 0., 70.
bandwidth = 4. # bandwidth of the windows in Hz
# compute source space psd in label
# Note: By using "return_generator=True" stcs will be a generator object
# instead of a list. This allows us so to iterate without having to
# keep everything in memory.
stcs = compute_source_psd_epochs(epochs, inverse_operator, lambda2=lambda2,
method=method, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, label=label,
return_generator=True)
# compute average PSD over the first 10 epochs
n_epochs = 10
for i, stc in enumerate(stcs):
if i >= n_epochs:
break
if i == 0:
psd_avg = np.mean(stc.data, axis=0)
else:
psd_avg += np.mean(stc.data, axis=0)
psd_avg /= n_epochs
freqs = stc.times # the frequencies are stored here
plt.figure()
plt.plot(freqs, psd_avg)
plt.xlabel('Freq (Hz)')
plt.ylabel('Power Spectral Density')
plt.show()
| bsd-3-clause |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-halfnorm-1.py | 1 | 1060 | from scipy.stats import halfnorm
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
mean, var, skew, kurt = halfnorm.stats(moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(halfnorm.ppf(0.01),
halfnorm.ppf(0.99), 100)
ax.plot(x, halfnorm.pdf(x),
'r-', lw=5, alpha=0.6, label='halfnorm pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = halfnorm()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = halfnorm.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], halfnorm.cdf(vals))
# True
# Generate random numbers:
r = halfnorm.rvs(size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
| gpl-2.0 |
jpautom/scikit-learn | sklearn/gaussian_process/gpr.py | 43 | 18642 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard sklearn estimator API, GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y: boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_: array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_: kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_: array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_: array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_: float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
vighneshbirodkar/scikit-image | skimage/viewer/tests/test_tools.py | 16 | 5682 | from collections import namedtuple
import numpy as np
from numpy.testing import assert_equal
from numpy.testing.decorators import skipif
from skimage import data
from skimage.viewer import ImageViewer, has_qt
from skimage.viewer.canvastools import (
LineTool, ThickLineTool, RectangleTool, PaintTool)
from skimage.viewer.canvastools.base import CanvasToolBase
try:
from matplotlib.testing.decorators import cleanup
except ImportError:
def cleanup(func):
return func
def get_end_points(image):
h, w = image.shape[0:2]
x = [w / 3, 2 * w / 3]
y = [h / 2] * 2
return np.transpose([x, y])
def do_event(viewer, etype, button=1, xdata=0, ydata=0, key=None):
"""
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
"""
ax = viewer.ax
event = namedtuple('Event',
('name canvas guiEvent x y inaxes xdata ydata '
'button key step'))
event.button = button
event.x, event.y = ax.transData.transform((xdata, ydata))
event.xdata, event.ydata = xdata, ydata
event.inaxes = ax
event.canvas = ax.figure.canvas
event.key = key
event.step = 1
event.guiEvent = None
event.name = 'Custom'
func = getattr(viewer._event_manager, 'on_%s' % etype)
func(event)
@cleanup
@skipif(not has_qt)
def test_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = LineTool(viewer, maxdist=10, line_props=dict(linewidth=3),
handle_props=dict(markersize=5))
tool.end_points = get_end_points(img)
assert_equal(tool.end_points, np.array([[170, 256], [341, 256]]))
# grab a handle and move it
do_event(viewer, 'mouse_press', xdata=170, ydata=256)
do_event(viewer, 'move', xdata=180, ydata=260)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, np.array([[180, 260], [341, 256]]))
# create a new line
do_event(viewer, 'mouse_press', xdata=10, ydata=10)
do_event(viewer, 'move', xdata=100, ydata=100)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, np.array([[100, 100], [10, 10]]))
@cleanup
@skipif(not has_qt)
def test_thick_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = ThickLineTool(viewer, maxdist=10, line_props=dict(color='red'),
handle_props=dict(markersize=5))
tool.end_points = get_end_points(img)
do_event(viewer, 'scroll', button='up')
assert_equal(tool.linewidth, 2)
do_event(viewer, 'scroll', button='down')
assert_equal(tool.linewidth, 1)
do_event(viewer, 'key_press', key='+')
assert_equal(tool.linewidth, 2)
do_event(viewer, 'key_press', key='-')
assert_equal(tool.linewidth, 1)
@cleanup
@skipif(not has_qt)
def test_rect_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = RectangleTool(viewer, maxdist=10)
tool.extents = (100, 150, 100, 150)
assert_equal(tool.corners,
((100, 150, 150, 100), (100, 100, 150, 150)))
assert_equal(tool.extents, (100, 150, 100, 150))
assert_equal(tool.edge_centers,
((100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150)))
assert_equal(tool.geometry, (100, 150, 100, 150))
# grab a corner and move it
do_event(viewer, 'mouse_press', xdata=100, ydata=100)
do_event(viewer, 'move', xdata=120, ydata=120)
do_event(viewer, 'mouse_release')
#assert_equal(tool.geometry, [120, 150, 120, 150])
# create a new line
do_event(viewer, 'mouse_press', xdata=10, ydata=10)
do_event(viewer, 'move', xdata=100, ydata=100)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, [10, 100, 10, 100])
@cleanup
@skipif(not has_qt)
def test_paint_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = PaintTool(viewer, img.shape)
tool.radius = 10
assert_equal(tool.radius, 10)
tool.label = 2
assert_equal(tool.label, 2)
assert_equal(tool.shape, img.shape)
do_event(viewer, 'mouse_press', xdata=100, ydata=100)
do_event(viewer, 'move', xdata=110, ydata=110)
do_event(viewer, 'mouse_release')
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
tool.label = 5
do_event(viewer, 'mouse_press', xdata=20, ydata=20)
do_event(viewer, 'move', xdata=40, ydata=40)
do_event(viewer, 'mouse_release')
assert_equal(tool.overlay[tool.overlay == 5].size, 881)
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
do_event(viewer, 'key_press', key='enter')
tool.overlay = tool.overlay * 0
assert_equal(tool.overlay.sum(), 0)
@cleanup
@skipif(not has_qt)
def test_base_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = CanvasToolBase(viewer)
tool.set_visible(False)
tool.set_visible(True)
do_event(viewer, 'key_press', key='enter')
tool.redraw()
tool.remove()
tool = CanvasToolBase(viewer, useblit=False)
tool.redraw()
| bsd-3-clause |
digitalghost/pycv-gameRobot | cvFixRotation.py | 1 | 1813 | import sys
import subprocess
import cv2
import numpy as np
from matplotlib import pyplot as plt
#adb shell dumpsys input | grep 'SurfaceOrientation' | awk '{ print $2 }'
# 1 For 90 degree, 3 For 270 degree
def _rotateAndScale(img, scaleFactor = 0.5, degreesCCW = 30):
(oldY,oldX) = img.shape #note: numpy uses (y,x) convention but most OpenCV functions use(x,y)
M = cv2.getRotationMatrix2D(center=(oldX/2,oldY/2), angle=degreesCCW, scale=scaleFactor)
#rotate about center of image.
#choose a new image size.
newX,newY = oldX*scaleFactor,oldY*scaleFactor
#include this if you want to prevent corners being cut off
r = np.deg2rad(degreesCCW)
newX,newY = (abs(np.sin(r)*newY) + abs(np.cos(r)*newX),abs(np.sin(r)*newX) + abs(np.cos(r)*newY))
(tx,ty) = ((newX-oldX)/2,(newY-oldY)/2)
M[0,2] += tx
M[1,2] += ty
rotatedImg = cv2.warpAffine(img, M,dsize=(int(newX),int(newY)))
return rotatedImg
def _run_command(command):
#print "Command is:" + command
p = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
return iter(p.stdout.readline, b'')
if len(sys.argv)<2 :
sys.exit("Not enough arguments.")
print "Excuting rotation for file, Filename is:" + str(sys.argv[1])
cmd = "adb shell dumpsys input | grep 'SurfaceOrientation' | awk '{ print $2 }'"
result = _run_command(cmd)
resCode = 0 #0,1,3
for res in result:
resCode = int(res.strip("\n"))
print resCode
break
orgImg = cv2.imread(sys.argv[1],0)
orgImg2 = orgImg.copy()
resultImg = None
if resCode == 1:
resultImg = _rotateAndScale(orgImg,1.0,90)
elif resCode == 3:
resultImg = _rotateAndScale(orgImg,1.0,-90)
else:
resultImg = orgImg
cv2.imwrite(sys.argv[1],resultImg)
| gpl-3.0 |
veltzer/pycmdtools | setup.py | 1 | 1843 | import setuptools
def get_readme():
with open('README.rst') as f:
return f.read()
setuptools.setup(
# the first three fields are a must according to the documentation
name="pycmdtools",
version="0.0.73",
packages=[
'pycmdtools',
],
# from here all is optional
description="pycmdtools is set of useful command line tools written in python",
long_description=get_readme(),
long_description_content_type="text/x-rst",
author="Mark Veltzer",
author_email="[email protected]",
maintainer="Mark Veltzer",
maintainer_email="[email protected]",
keywords=[
'utils',
'command line',
'python',
'shell',
'utilities',
],
url="https://veltzer.github.io/pycmdtools",
download_url="https://github.com/veltzer/pycmdtools",
license="MIT",
platforms=[
'python3',
],
install_requires=[
'pylogconf',
'pytconf',
'requests',
'tqdm',
'numpy',
'pandas',
'unidecode',
'pyyaml',
'pytidylib',
'beautifulsoup4',
'lxml',
],
extras_require={
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
],
data_files=[
],
entry_points={"console_scripts": [
'pycmdtools=pycmdtools.main:main',
]},
python_requires=">=3.6",
)
| mit |
mariusvniekerk/bayes_logistic | bayes_logistic/sklearn.py | 1 | 7105 | # Copyright (c) 2015 MaxPoint Interactive, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from sklearn.utils.validation import NotFittedError
from .bayes_logistic import fit_bayes_logistic as _fit, bayes_logistic_prob as _predict
import six
import numpy as np
from abc import ABCMeta, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.linear_model import LogisticRegression
class BayesLogisticBase(six.with_metaclass(ABCMeta, BaseEstimator), ClassifierMixin):
"""Bayesian Logistic Regression Solver.
Assumes Laplace (Gaussian) Approximation to the posterior of the fitted parameter vector.
Uses scipy.optimize.minimize.
Initial priors for the coefficients and the hessian need not be provided. If not provided these will be initialized
to an uninformative initial solution equivalent to an L2 penalty.
Parameters
----------
coef : array-like, shape (n_features, ), optional
array of prior means on the parameters to be fit
H : array-like, shape (n_features, n_features) or (n_features, ), optional
array of prior Hessian (inverse covariance of prior distribution of parameters)
solver : string
scipy optimize solver used. this should be either 'Newton-CG', 'BFGS' or 'L-BFGS-B'.
The default is Newton-CG.
bounds : iterable of length p
A length p list (or tuple) of tuples each of length 2.
This is only used if the solver is set to 'L-BFGS-B'. In that case, a tuple
(lower_bound, upper_bound), both floats, is defined for each parameter. See the
scipy.optimize.minimize docs for further information.
maxiter : int
Maximum number of iterations for scipy.optimize.minimize solver.
"""
def __init__(self, H=None, coef=None, solver='Newton-CG', bounds=None, maxiter=100):
self.maxiter = maxiter
self.bounds = bounds
self.H_ = H
self.coef_ = coef
self.solver = solver
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : {array-like, None}, shape (n_samples,)
Optional weight vector to weight each observation by. Weights expected to be in [0,1].
Returns
-------
self : object
Returns self.
"""
self.partial_fit(X, y, sample_weight)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Update the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : {array-like, None}, shape (n_samples,)
Optional weight vector to weight each observation by. Weights expected to be in [0,1].
Returns
-------
self : object
Returns self.
"""
self._ensure_valid_wh(X, y)
self.coef_, self.H_ = _fit(y=y, X=X, wprior=self.coef_, H=self.H_, solver=self.solver, maxiter=self.maxiter,
weights=sample_weight, bounds=self.bounds)
return self
def predict(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, ]
Returns the probability of the sample for the model
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted"
"yet" % {'name': type(self).__name__})
return _predict(X, self.coef_, self.H_)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, ]
Returns the probability of the sample for the model
"""
if not hasattr(self, 'coef_') or self.coef_ is None:
raise NotFittedError("This %(name)s instance is not fitted"
"yet" % {'name': type(self).__name__})
return _predict(X, self.coef_, self.H_)
def _ensure_valid_wh(self, X, y):
"""Ensures that there are valid values for the coefficients and the hessian.
If not initialized this sets the coefficients and hessian to be equivalent to the L2 penalty
"""
dim = X.shape[1]
if self.H_ is None:
self.H_ = np.diag(np.ones(dim)) * 0.001
if self.coef_ is None:
self.coef_ = np.zeros(dim)
class BayesLogisticClassifier(BayesLogisticBase, ClassifierMixin):
pass
class BayesLogisticRegressor(BayesLogisticBase, RegressorMixin):
pass
__all__ = ["BayesLogisticClassifier", "BayesLogisticRegressor"]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.