repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/sparse/array.py | 5 | 16980 | """
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
from numpy import nan, ndarray
import numpy as np
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas import compat, lib
from pandas.compat import range
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
import pandas.index as _index
import pandas.core.ops as ops
def _arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, com.ABCSparseArray):
other = SparseArray(other, fill_value=self.fill_value)
if name[0] == 'r':
return _sparse_array_op(other, self, op, name[1:])
else:
return _sparse_array_op(self, other, op, name)
elif np.isscalar(other):
new_fill_value = op(np.float64(self.fill_value),
np.float64(other))
return SparseArray(op(self.sp_values, other),
sparse_index=self.sp_index,
fill_value=new_fill_value)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _sparse_array_op(left, right, op, name):
if np.isnan(left.fill_value):
sparse_op = lambda a, b: _sparse_nanop(a, b, name)
else:
sparse_op = lambda a, b: _sparse_fillop(a, b, name)
if left.sp_index.equals(right.sp_index):
result = op(left.sp_values, right.sp_values)
result_index = left.sp_index
else:
result, result_index = sparse_op(left, right)
try:
fill_value = op(left.fill_value, right.fill_value)
except:
fill_value = nan
return SparseArray(result, sparse_index=result_index,
fill_value=fill_value)
def _sparse_nanop(this, other, name):
sparse_op = getattr(splib, 'sparse_nan%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
other.sp_values,
other.sp_index)
return result, result_index
def _sparse_fillop(this, other, name):
sparse_op = getattr(splib, 'sparse_%s' % name)
result, result_index = sparse_op(this.sp_values,
this.sp_index,
this.fill_value,
other.sp_values,
other.sp_index,
other.fill_value)
return result, result_index
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Defaults to NaN (code for missing)
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(
cls, data, sparse_index=None, index=None, kind='integer', fill_value=None,
dtype=np.float64, copy=False):
if index is not None:
if data is None:
data = np.nan
if not np.isscalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(len(index), dtype='float64')
values.fill(data)
data = values
if dtype is not None:
dtype = np.dtype(dtype)
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
sparse_index = data.sp_index
values = np.asarray(data)
else:
# array-like
if sparse_index is None:
values, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
else:
values = data
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
else:
subarr = np.asarray(values, dtype=dtype)
# if we have a bool type, make sure that we have a bool fill_value
if (dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr)):
if np.isnan(fill_value) or not fill_value:
fill_value = False
else:
fill_value = bool(fill_value)
# Change the class of the array to be the subclass type.
output = subarr.view(cls)
output.sp_index = sparse_index
output.fill_value = fill_value
return output
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = getattr(obj, 'sp_index', None)
self.fill_value = getattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self.fill_value = fill_value
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (com.pprint_thing(self),
com.pprint_thing(self.fill_value),
com.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruediv__ = disable
__ifloordiv__ = disable
__ipow__ = disable
# Python 2 division operators
if not compat.PY3:
__idiv__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(len(self), dtype=np.float64)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
def get_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseSeries to (dense) Series
"""
values = self.values
# fill the nans
if fill is None:
fill = self.fill_value
if not np.isnan(fill):
values[np.isnan(values)] = fill
return values
def __iter__(self):
for i in range(len(self)):
yield self._get_val_at(i)
raise StopIteration
def __getitem__(self, key):
"""
"""
if com.is_integer(key):
return self._get_val_at(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__getitem__(slobj)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return _index.get_value_at(self, sp_loc)
def take(self, indices, axis=0):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
indices = np.atleast_1d(np.asarray(indices, dtype=int))
# allow -1 to indicate missing values
n = len(self)
if ((indices >= n) | (indices < -1)).any():
raise IndexError('out of bounds access')
if self.sp_index.npoints > 0:
locs = np.array([self.sp_index.lookup(loc) if loc > -1 else -1
for loc in indices])
result = self.sp_values.take(locs)
mask = locs == -1
if mask.any():
try:
result[mask] = self.fill_value
except ValueError:
# wrong dtype
result = result.astype('float64')
result[mask] = self.fill_value
else:
result = np.empty(len(indices))
result.fill(self.fill_value)
return result
def __setitem__(self, key, value):
# if com.is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars via setitem")
raise TypeError(
"SparseArray does not support item assignment via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
# if not np.isscalar(value):
# raise Exception("SparseArray does not support seting non-scalars via slices")
#x = self.values
#x[slobj] = value
#self.values = x
raise TypeError(
"SparseArray does not support item assignment via slices")
def astype(self, dtype=None):
"""
"""
dtype = np.dtype(dtype)
if dtype is not None and dtype not in (np.float_, float):
raise TypeError('Can only support floating point data for now')
return self.copy()
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return SparseArray(values, sparse_index=self.sp_index,
dtype=self.dtype,
fill_value=self.fill_value)
def count(self):
"""
Compute sum of non-NA/null observations in SparseSeries. If the
fill_value is not NaN, the "sparse" locations will be included in the
observation count
Returns
-------
nobs : int
"""
sp_values = self.sp_values
valid_spvals = np.isfinite(sp_values).sum()
if self._null_fill_value:
return valid_spvals
else:
return valid_spvals + self.sp_index.ngaps
@property
def _null_fill_value(self):
return np.isnan(self.fill_value)
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = np.isfinite(sp_vals)
return sp_vals[mask]
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves locations of NaN values
Extra parameters are to preserve ndarray interface.
Returns
-------
cumsum : Series
"""
if com.notnull(self.fill_value):
return self.to_dense().cumsum()
# TODO: what if sp_values contains NaN??
return SparseArray(self.sp_values.cumsum(),
sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def _maybe_to_dense(obj):
""" try to convert to dense """
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
if isinstance(array, com.ABCSparseSeries):
array = SparseArray(
array.values, sparse_index=array.sp_index, fill_value=array.fill_value, copy=True)
if not isinstance(array, SparseArray):
array = com._values_from_object(array)
return array
def make_sparse(arr, kind='block', fill_value=nan):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
Returns
-------
(sparse_values, index) : (ndarray, SparseIndex)
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
if np.isscalar(arr):
arr = [arr]
arr = np.asarray(arr)
length = len(arr)
if np.isnan(fill_value):
mask = ~np.isnan(arr)
else:
mask = arr != fill_value
indices = np.arange(length, dtype=np.int32)[mask]
if kind == 'block':
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == 'integer':
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError('must be block or integer type')
sparsified_values = arr[mask]
return sparsified_values, index
ops.add_special_arithmetic_methods(SparseArray,
arith_method=_arith_method,
use_numexpr=False)
def _concat_compat(to_concat, axis=0):
"""
provide concatenation of an sparse/dense array of arrays each of which is a single dtype
Parameters
----------
to_concat : array of arrays
axis : axis to provide concatenation
Returns
-------
a single array, preserving the combined dtypes
"""
def convert_sparse(x, axis):
# coerce to native type
if isinstance(x, SparseArray):
x = x.get_values()
x = x.ravel()
if axis > 0:
x = np.atleast_2d(x)
return x
typs = com.get_dtype_kinds(to_concat)
# we have more than one type here, so densify and regular concat
to_concat = [ convert_sparse(x, axis) for x in to_concat ]
result = np.concatenate(to_concat,axis=axis)
if not len(typs-set(['sparse','f','i'])):
# we can remain sparse
result = SparseArray(result.ravel())
else:
# coerce to object if needed
result = result.astype('object')
return result
| gpl-2.0 |
musically-ut/statsmodels | statsmodels/stats/tests/test_pairwise.py | 26 | 12256 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 15:34:18 2012
Author: Josef Perktold
"""
import warnings
from statsmodels.compat.python import BytesIO, asbytes, range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_raises, assert_allclose)
from statsmodels.stats.libqsturng import qsturng
ss = '''\
43.9 1 1
39.0 1 2
46.7 1 3
43.8 1 4
44.2 1 5
47.7 1 6
43.6 1 7
38.9 1 8
43.6 1 9
40.0 1 10
89.8 2 1
87.1 2 2
92.7 2 3
90.6 2 4
87.7 2 5
92.4 2 6
86.1 2 7
88.1 2 8
90.8 2 9
89.1 2 10
68.4 3 1
69.3 3 2
68.5 3 3
66.4 3 4
70.0 3 5
68.1 3 6
70.6 3 7
65.2 3 8
63.8 3 9
69.2 3 10
36.2 4 1
45.2 4 2
40.7 4 3
40.5 4 4
39.3 4 5
40.3 4 6
43.2 4 7
38.7 4 8
40.9 4 9
39.7 4 10'''
#idx Treatment StressReduction
ss2 = '''\
1 mental 2
2 mental 2
3 mental 3
4 mental 4
5 mental 4
6 mental 5
7 mental 3
8 mental 4
9 mental 4
10 mental 4
11 physical 4
12 physical 4
13 physical 3
14 physical 5
15 physical 4
16 physical 1
17 physical 1
18 physical 2
19 physical 3
20 physical 3
21 medical 1
22 medical 2
23 medical 2
24 medical 2
25 medical 3
26 medical 2
27 medical 3
28 medical 1
29 medical 3
30 medical 1'''
ss3 = '''\
1 24.5
1 23.5
1 26.4
1 27.1
1 29.9
2 28.4
2 34.2
2 29.5
2 32.2
2 30.1
3 26.1
3 28.3
3 24.3
3 26.2
3 27.8'''
ss5 = '''\
2 - 3\t4.340\t0.691\t7.989\t***
2 - 1\t4.600\t0.951\t8.249\t***
3 - 2\t-4.340\t-7.989\t-0.691\t***
3 - 1\t0.260\t-3.389\t3.909\t-
1 - 2\t-4.600\t-8.249\t-0.951\t***
1 - 3\t-0.260\t-3.909\t3.389\t'''
cylinders = np.array([8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 6, 6, 6, 4, 4,
4, 4, 4, 4, 6, 8, 8, 8, 8, 4, 4, 4, 4, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 6, 6,
6, 6, 4, 4, 4, 4, 4, 8, 4, 6, 6, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4])
cyl_labels = np.array(['USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'France',
'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Japan',
'Germany', 'France', 'Germany', 'Sweden', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'USA', 'USA', 'France', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'Japan', 'USA', 'USA', 'USA', 'USA', 'Germany', 'Japan', 'Japan', 'USA', 'Sweden', 'USA', 'France',
'Japan', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA',
'Germany', 'Japan', 'Japan', 'USA', 'USA', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'USA',
'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Germany', 'USA', 'USA', 'USA'])
#accommodate recfromtxt for python 3.2, requires bytes
ss = asbytes(ss)
ss2 = asbytes(ss2)
ss3 = asbytes(ss3)
ss5 = asbytes(ss5)
dta = np.recfromtxt(BytesIO(ss), names=("Rust","Brand","Replication"))
dta2 = np.recfromtxt(BytesIO(ss2), names = ("idx", "Treatment", "StressReduction"))
dta3 = np.recfromtxt(BytesIO(ss3), names = ("Brand", "Relief"))
dta5 = np.recfromtxt(BytesIO(ss5), names = ('pair', 'mean', 'lower', 'upper', 'sig'), delimiter='\t')
sas_ = dta5[[1,3,2]]
from statsmodels.stats.multicomp import (tukeyhsd, pairwise_tukeyhsd,
MultiComparison)
#import statsmodels.sandbox.stats.multicomp as multi
#print tukeyhsd(dta['Brand'], dta['Rust'])
def get_thsd(mci, alpha=0.05):
var_ = np.var(mci.groupstats.groupdemean(), ddof=len(mci.groupsunique))
means = mci.groupstats.groupmean
nobs = mci.groupstats.groupnobs
resi = tukeyhsd(means, nobs, var_, df=None, alpha=alpha,
q_crit=qsturng(1-alpha, len(means), (nobs-1).sum()))
#print resi[4]
var2 = (mci.groupstats.groupvarwithin() * (nobs - 1.)).sum() \
/ (nobs - 1.).sum()
#print nobs, (nobs - 1).sum()
#print mci.groupstats.groupvarwithin()
assert_almost_equal(var_, var2, decimal=14)
return resi
class CheckTuckeyHSDMixin(object):
@classmethod
def setup_class_(self):
self.mc = MultiComparison(self.endog, self.groups)
self.res = self.mc.tukeyhsd(alpha=self.alpha)
def test_multicomptukey(self):
assert_almost_equal(self.res.meandiffs, self.meandiff2, decimal=14)
assert_almost_equal(self.res.confint, self.confint2, decimal=2)
assert_equal(self.res.reject, self.reject2)
def test_group_tukey(self):
res_t = get_thsd(self.mc, alpha=self.alpha)
assert_almost_equal(res_t[4], self.confint2, decimal=2)
def test_shortcut_function(self):
#check wrapper function
res = pairwise_tukeyhsd(self.endog, self.groups, alpha=self.alpha)
assert_almost_equal(res.confint, self.res.confint, decimal=14)
class TestTuckeyHSD2(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#balanced case
self.endog = dta2['StressReduction']
self.groups = dta2['Treatment']
self.alpha = 0.05
self.setup_class_() #in super
#from R
tukeyhsd2s = np.array([ 1.5,1,-0.5,0.3214915,
-0.1785085,-1.678509,2.678509,2.178509,
0.6785085,0.01056279,0.1079035,0.5513904]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.05
def test_table_names_default_group_order(self):
t = self.res._results_table
# if the group_order parameter is not used, the groups should
# be reported in alphabetical order
expected_order = [(b'medical', b'mental'),
(b'medical', b'physical'),
(b'mental', b'physical')]
for i in range(1, 4):
first_group = t[i][0].data
second_group = t[i][1].data
assert_((first_group, second_group) == expected_order[i - 1])
def test_table_names_custom_group_order(self):
# if the group_order parameter is used, the groups should
# be reported in the specified order
mc = MultiComparison(self.endog, self.groups,
group_order=[b'physical', b'medical', b'mental'])
res = mc.tukeyhsd(alpha=self.alpha)
#print(res)
t = res._results_table
expected_order = [(b'physical',b'medical'),
(b'physical',b'mental'),
(b'medical', b'mental')]
for i in range(1, 4):
first_group = t[i][0].data
second_group = t[i][1].data
assert_((first_group, second_group) == expected_order[i - 1])
class TestTuckeyHSD2Pandas(TestTuckeyHSD2):
@classmethod
def setup_class(self):
super(TestTuckeyHSD2Pandas, self).setup_class()
import pandas
self.endog = pandas.Series(self.endog)
# we are working with bytes on python 3, not with strings in this case
self.groups = pandas.Series(self.groups, dtype=object)
def test_incorrect_output(self):
# too few groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 4)
# too many groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 6)
# just one group
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1] * 10)
# group_order doesn't select all observations, only one group left
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
assert_raises(ValueError, MultiComparison, np.array([1] * 10),
[1, 2] * 5, group_order=[1])
# group_order doesn't select all observations,
# we do tukey_hsd with reduced set of observations
data = np.arange(15)
groups = np.repeat([1, 2, 3], 5)
mod1 = MultiComparison(np.array(data), groups, group_order=[1, 2])
res1 = mod1.tukeyhsd(alpha=0.01)
mod2 = MultiComparison(np.array(data[:10]), groups[:10])
res2 = mod2.tukeyhsd(alpha=0.01)
attributes = ['confint', 'data', 'df_total', 'groups', 'groupsunique',
'meandiffs', 'q_crit', 'reject', 'reject2', 'std_pairs',
'variance']
for att in attributes:
err_msg = att + 'failed'
assert_allclose(getattr(res1, att), getattr(res2, att), rtol=1e-14,
err_msg=err_msg)
attributes = ['data', 'datali', 'groupintlab', 'groups', 'groupsunique',
'ngroups', 'nobs', 'pairindices']
for att in attributes:
err_msg = att + 'failed'
assert_allclose(getattr(mod1, att), getattr(mod2, att), rtol=1e-14,
err_msg=err_msg)
class TestTuckeyHSD2s(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case
self.endog = dta2['StressReduction'][3:29]
self.groups = dta2['Treatment'][3:29]
self.alpha = 0.01
self.setup_class_()
#from R
tukeyhsd2s = np.array(
[1.8888888888888889, 0.888888888888889, -1, 0.2658549,
-0.5908785, -2.587133, 3.511923, 2.368656,
0.5871331, 0.002837638, 0.150456, 0.1266072]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.01
class TestTuckeyHSD3(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#SAS case
self.endog = dta3['Relief']
self.groups = dta3['Brand']
self.alpha = 0.05
self.setup_class_()
#super(self, self).setup_class_()
#CheckTuckeyHSD.setup_class_()
self.meandiff2 = sas_['mean']
self.confint2 = sas_[['lower','upper']].view(float).reshape((3,2))
self.reject2 = sas_['sig'] == asbytes('***')
class TestTuckeyHSD4(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case verified in Matlab
self.endog = cylinders
self.groups = cyl_labels
self.alpha = 0.05
self.setup_class_()
self.res._simultaneous_ci()
#from Matlab
self.halfwidth2 = np.array([1.5228335685980883, 0.9794949704444682, 0.78673802805533644,
2.3321237694566364, 0.57355135882752939])
self.meandiff2 = np.array([0.22222222222222232, 0.13333333333333375, 0.0, 2.2898550724637685,
-0.088888888888888573, -0.22222222222222232, 2.0676328502415462,
-0.13333333333333375, 2.1565217391304348, 2.2898550724637685])
self.confint2 = np.array([-2.32022210717, 2.76466655161, -2.247517583, 2.51418424967,
-3.66405224956, 3.66405224956, 0.113960166573, 4.46574997835,
-1.87278583908, 1.6950080613, -3.529655688, 3.08521124356, 0.568180988881,
3.5670847116, -3.31822643175, 3.05155976508, 0.951206924521, 3.36183655374,
-0.74487911754, 5.32458926247]).reshape(10,2)
self.reject2 = np.array([False, False, False, True, False, False, True, False, True, False])
def test_hochberg_intervals(self):
assert_almost_equal(self.res.halfwidths, self.halfwidth2, 14)
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/io/stata.py | 6 | 83955 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.core.dtypes.common import (
is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util._decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas._libs.lib import max_len_string_array, infer_dtype
from pandas._libs.tslib import NaT, Timestamp
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
try:
data = reader.read()
finally:
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'latin-1'
def __init__(self, encoding):
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='latin-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {'0': ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
self._file.write(self._null_terminate(ts))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| mit |
ElDeveloper/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
freeman-lab/altair | altair/tests/test_utils.py | 5 | 1293 | import pytest
import warnings
import numpy as np
import pandas as pd
from ..utils import parse_shorthand, infer_vegalite_type
def test_parse_shorthand():
def check(s, **kwargs):
assert parse_shorthand(s) == kwargs
check('')
check('foobar', name='foobar')
check('foobar:nominal', type='N', name='foobar')
check('foobar:O', type='O', name='foobar')
check('avg(foobar)', name='foobar', aggregate='avg')
check('min(foobar):time', type='T', name='foobar', aggregate='min')
check('sum(foobar):Q', type='Q', name='foobar', aggregate='sum')
def test_infer_vegalite_type():
def _check(arr, typ):
assert infer_vegalite_type(arr) == typ
_check(np.arange(5, dtype=float), 'Q')
_check(np.arange(5, dtype=int), 'Q')
_check(np.zeros(5, dtype=bool), 'N')
_check(pd.date_range('2012', '2013'), 'T')
_check(pd.timedelta_range(365, periods=12), 'T')
nulled = pd.Series(np.random.randint(10, size=10))
nulled[0] = None
_check(nulled, 'Q')
_check(['a', 'b', 'c'], 'N')
if hasattr(pytest, 'warns'): # added in pytest 2.8
with pytest.warns(UserWarning):
_check([], 'N')
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
_check([], 'N')
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 57 | 4736 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import _preprocess_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = _preprocess_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/cross_validation.py | 8 | 58526 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
galaxyproject/tools-iuc | tools/table_compute/scripts/safety.py | 17 | 9977 | import re
class Safety():
"""
Class to safely evaluate mathematical expression on single
or table data
"""
__allowed_tokens = (
'(', ')', 'if', 'else', 'or', 'and', 'not', 'in',
'+', '-', '*', '/', '%', ',', '!=', '==', '>', '>=', '<', '<=',
'min', 'max', 'sum',
)
__allowed_ref_types = {
'pd.DataFrame': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'applymap', 'as_matrix', 'asfreq', 'at', 'axes', 'bool',
'clip', 'clip_lower', 'clip_upper', 'columns', 'combine',
'compound', 'corr', 'count', 'cov', 'cummax', 'cummin', 'cumprod',
'cumsum', 'describe', 'div', 'divide', 'dot', 'drop',
'drop_duplicates', 'droplevel', 'dropna', 'duplicated', 'empty',
'eq', 'equals', 'expanding', 'ffill', 'fillna', 'filter', 'first',
'first_valid_index', 'floordiv', 'ge', 'groupby', 'gt', 'head',
'iat', 'iloc', 'index', 'insert', 'interpolate', 'isin', 'isna',
'isnull', 'items', 'iteritems', 'iterrows', 'itertuples', 'ix',
'join', 'keys', 'kurt', 'kurtosis', 'last', 'last_valid_index',
'le', 'loc', 'lookup', 'lt', 'mad', 'mask', 'max', 'mean',
'median', 'melt', 'merge', 'min', 'mod', 'mode', 'mul', 'multiply',
'ndim', 'ne', 'nlargest', 'notna', 'notnull', 'nsmallest',
'nunique', 'pct_change', 'pivot', 'pivot_table', 'pop', 'pow',
'prod', 'product', 'quantile', 'radd', 'rank', 'rdiv', 'replace',
'resample', 'rfloordiv', 'rmod', 'rmul', 'rolling', 'round',
'rpow', 'rsub', 'rtruediv', 'sample', 'select',
'sem', 'shape', 'shift', 'size', 'skew', 'slice_shift',
'squeeze', 'stack', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unstack', 'var', 'where',
},
'pd.Series': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'argsort', 'as_matrix', 'asfreq', 'asof', 'astype', 'at',
'at_time', 'autocorr', 'axes', 'between', 'between_time', 'bfill',
'bool', 'cat', 'clip', 'clip_lower', 'clip_upper', 'combine',
'combine_first', 'compound', 'corr', 'count', 'cov', 'cummax',
'cummin', 'cumprod', 'cumsum', 'describe', 'diff', 'div', 'divide',
'divmod', 'dot', 'drop', 'drop_duplicates', 'droplevel', 'dropna',
'dt', 'dtype', 'dtypes', 'duplicated', 'empty', 'eq', 'equals',
'ewm', 'expanding', 'factorize', 'ffill', 'fillna', 'filter',
'first', 'first_valid_index', 'flags', 'floordiv', 'ge', 'groupby',
'gt', 'hasnans', 'head', 'iat', 'idxmax', 'idxmin', 'iloc', 'imag',
'index', 'interpolate', 'is_monotonic', 'is_monotonic_decreasing',
'is_monotonic_increasing', 'is_unique', 'isin', 'isna', 'isnull',
'item', 'items', 'iteritems', 'ix', 'keys', 'kurt', 'kurtosis',
'last', 'last_valid_index', 'le', 'loc', 'lt', 'mad', 'map',
'mask', 'max', 'mean', 'median', 'min', 'mod', 'mode', 'mul',
'multiply', 'name', 'ndim', 'ne', 'nlargest', 'nonzero', 'notna',
'notnull', 'nsmallest', 'nunique', 'pct_change', 'pop', 'pow',
'prod', 'product', 'ptp', 'quantile', 'radd', 'rank', 'rdiv',
'rdivmod', 'real', 'repeat', 'replace', 'resample', 'rfloordiv',
'rmod', 'rmul', 'rolling', 'round', 'rpow', 'rsub', 'rtruediv',
'sample', 'searchsorted', 'select', 'sem', 'shape', 'shift',
'size', 'skew', 'slice_shift', 'sort_index', 'sort_values',
'squeeze', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unique', 'unstack',
'value_counts', 'var', 'where', 'xs',
},
}
__allowed_qualified = {
# allowed numpy functionality
'np': {
'abs', 'add', 'all', 'any', 'append', 'array', 'bool', 'ceil',
'complex', 'cos', 'cosh', 'cov', 'cumprod', 'cumsum', 'degrees',
'divide', 'divmod', 'dot', 'e', 'empty', 'exp', 'float', 'floor',
'hypot', 'inf', 'int', 'isfinite', 'isin', 'isinf', 'isnan', 'log',
'log10', 'log2', 'max', 'mean', 'median', 'min', 'mod', 'multiply',
'nan', 'ndim', 'pi', 'product', 'quantile', 'radians', 'rank',
'remainder', 'round', 'sin', 'sinh', 'size', 'sqrt', 'squeeze',
'stack', 'std', 'str', 'subtract', 'sum', 'swapaxes', 'take',
'tan', 'tanh', 'transpose', 'unique', 'var', 'where',
},
# allowed math functionality
'math': {
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil',
'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp',
'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf',
'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2',
'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'tau', 'trunc',
},
# allowed pd functionality
'pd': {
'DataFrame', 'array', 'concat', 'cut', 'date_range', 'factorize',
'interval_range', 'isna', 'isnull', 'melt', 'merge', 'notna',
'notnull', 'period_range', 'pivot', 'pivot_table', 'unique',
'value_counts', 'wide_to_long',
},
}
def __init__(self, expression,
ref_whitelist=None, ref_type=None,
custom_qualified=None):
self.allowed_qualified = self.__allowed_qualified.copy()
if ref_whitelist is None:
self.these = []
else:
self.these = ref_whitelist
if ref_type is None or ref_type not in self.__allowed_ref_types:
self.allowed_qualified['_this'] = set()
else:
self.allowed_qualified[
'_this'
] = self.__allowed_ref_types[ref_type]
if custom_qualified is not None:
self.allowed_qualified.update(custom_qualified)
self.expr = expression
self.__assertSafe()
def generateFunction(self):
"Generates a function to be evaluated outside the class"
cust_fun = "def fun(%s):\n\treturn(%s)" % (self.these[0], self.expr)
return cust_fun
def __assertSafe(self):
indeed, problematic_token = self.__isSafeStatement()
if not indeed:
self.detailedExcuse(problematic_token)
raise ValueError("Custom Expression is not safe.")
@staticmethod
def detailedExcuse(word):
"Gives a verbose statement for why users should not use some specific operators."
mess = None
if word == "for":
mess = "for loops and comprehensions are not allowed. Use numpy or pandas table operations instead."
elif word == ":":
mess = "Colons are not allowed. Use inline Python if/else statements."
elif word == "=":
mess = "Variable assignment is not allowed. Use object methods to substitute values."
elif word in ("[", "]"):
mess = "Direct indexing of arrays is not allowed. Use numpy or pandas functions/methods to address specific parts of tables."
else:
mess = "Not an allowed token in this operation"
print("( '%s' ) %s" % (word, mess))
def __isSafeStatement(self):
"""
Determines if a user-expression is safe to evaluate.
To be considered safe an expression may contain only:
- standard Python operators and numbers
- inline conditional expressions
- select functions and objects
by default, these come from the math, numpy and pandas
libraries, and must be qualified with the modules' conventional
names math, np, pd; can be overridden at the instance level
- references to a whitelist of objects (pd.DataFrames by default)
and their methods
"""
safe = True
# examples of user-expressions
# '-math.log(1 - elem/4096) * 4096 if elem != 1 else elem - 0.5'
# 'vec.median() + vec.sum()'
# 1. Break expressions into tokens
# e.g.,
# [
# '-', 'math.log', '(', '1', '-', 'elem', '/', '4096', ')', '*',
# '4096', 'if', 'elem', '!=', '1', 'else', 'elem', '-', '0.5'
# ]
# or
# ['vec.median', '(', ')', '+', 'vec.sum', '(', ')']
tokens = [
e for e in re.split(
r'([a-zA-Z0-9_.]+|[^a-zA-Z0-9_.() ]+|[()])', self.expr
) if e.strip()
]
# 2. Subtract allowed standard tokens
rem = [e for e in tokens if e not in self.__allowed_tokens]
# 3. Subtract allowed qualified objects from allowed modules
# and whitelisted references and their attributes
rem2 = []
for e in rem:
parts = e.split('.')
if len(parts) == 1:
if parts[0] in self.these:
continue
if len(parts) == 2:
if parts[0] in self.these:
parts[0] = '_this'
if parts[0] in self.allowed_qualified:
if parts[1] in self.allowed_qualified[parts[0]]:
continue
rem2.append(e)
# 4. Assert that rest are real numbers or strings
e = ''
for e in rem2:
try:
_ = float(e)
except ValueError:
safe = False
break
return safe, e
| mit |
njpayne/euclid | python/clustering.py | 1 | 3241 | from sklearn import decomposition, cluster, feature_selection
import matplotlib.pyplot as plt
import numpy as np
import os
import pylab
data_location = "../Data" # read data from os.path.join(data_location, <filename>)
results_location = "Results" # save results text/graph to os.path.join(results_location, <filename>)
def clean_features(data, header, **kwargs):
#extract parameters
min_feature_variance = kwargs.get('min_feature_variance', .8 * (1 - .8))
#remove features with variance below the threshold
feature_selector = feature_selection.VarianceThreshold(threshold=min_feature_variance)
reduced_data = feature_selector.fit_transform(data)
#create a mask of features selected
mask = feature_selector.get_support(indices = True)
#select the same indexes from the header
reduced_header = np.take(header, mask)
return reduced_data, reduced_header
def univariate_selection(features, labels, **kwargs):
#extract parameters
is_regression = kwargs.get('is_regression', False)
n_best = kwargs.get('n_best', 2)
#select scoring function
#For regression: f_regression
#For classification: chi2 or f_classif
if(is_regression):
scoring_function = feature_selection.f_regression
else:
#chi2 requires non negative features
if(features.min() < 0):
scoring_function = feature_selection.f_classif
else:
scoring_function = feature_selection.chi2
#establish the selection function
selector = feature_selection.SelectKBest(scoring_function, k=n_best)
#train the function
selector.fit(features, labels.flatten())
#get the scores
feature_scores = selector.scores_
#transform the data
tranformed_data = selector.transform(features)
#chart the results
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
X_indices = np.arange(features.shape[-1])
plt.figure(1)
plt.clf()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
pylab.savefig(os.path.join(results_location, "Univariate Selection %d Features" % n_best))
return tranformed_data, feature_scores
def pca_reduce(data, **kwargs):
#extract parameters
n_components = kwargs.get('n_components', 'mle')
copy = kwargs.get('copy', True)
whiten = kwargs.get('whiten', True)
#set up PCA function
pca = decomposition.RandomizedPCA(n_components = n_components, copy = copy, whiten = whiten)
#fit the data
pca.fit(data)
#run the reduction
reduced_data = pca.transform(data)
return reduced_data
def k_means_cluster(data, **kwargs):
#extract up parameters
n_clusters = kwargs.get('n_clusters', 10)
n_init = kwargs.get('n_init', 10)
#set up the clustering function
estimator = cluster.KMeans(n_clusters = n_clusters, n_init = n_init)
#fit the data to the training set
estimator.fit(data)
#transform the data
transformed_data= estimator.transform(data)
return transformed_data | gpl-2.0 |
mojoboss/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
lukauskas/seaborn | seaborn/algorithms.py | 3 | 4233 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
import warnings
from .external.six import string_types
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, string_types):
def f(x):
return getattr(x, func)()
else:
f = func
# Do the bootstrap
if smooth:
msg = "Smooth bootstraps are deprecated and will be removed."
warnings.warn(msg)
return _smooth_bootstrap(args, n_boot, f, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
| bsd-3-clause |
nickcdryan/rep | rep/report/metrics.py | 3 | 10873 | """
This file contains definitions for useful metrics in specific REP format.
In general case, metrics follows standard sklearn convention for **estimators**, provides
* constructor (you should create instance of metric!):
>>> metric = RocAuc(parameter=1)
* fitting, where checks and heavy computations performed
(this step is needed for ranking metrics, uniformity metrics):
>>> metric.fit(X, y, sample_weight=None)
* computation of metrics by probabilities:
>>> proba = classifier.predict_proba(X)
>>> metrics(proba)
This way metrics can be used in learning curves, for instance. Once fitted, then for every stage
computation will be very fast.
Correspondence between physical terms and ML terms
**************************************************
Some notation used below:
* IsSignal (IsS) --- is really signal
* AsSignal (AsS) --- classified as signal
* IsBackgroundAsSignal - background, but classified as signal
... and so on. Cute, right?
There are many ways to denote this things:
* tpr = s = isSasS / isS
* fpr = b = isBasS / isB
Here we used normalized s and b, while physicists usually normalize
them to particular values of expected amount of s and b.
* signal efficiency = tpr = s
the following line used only in HEP
* background efficiency = fpr = b
"""
from __future__ import division, print_function, absolute_import
import numpy
from sklearn.base import BaseEstimator
from sklearn.metrics import roc_auc_score, roc_curve
from ..utils import check_arrays
from ..utils import check_sample_weight, weighted_percentile
__author__ = 'Alex Rogozhnikov'
class MetricMixin(object):
"""Class with helpful methods for metrics,
metrics are expected (but not obliged) to be derived from it."""
def _prepare(self, X, y, sample_weight):
"""
Preparation
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: X, y, sample_weight, indices
"""
assert len(X) == len(y), 'Lengths are different!'
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
self.classes_, indices = numpy.unique(y, return_inverse=True)
self.probabilities_shape = (len(y), len(self.classes_))
return X, y, sample_weight, indices
def fit(self, X, y, sample_weight=None):
"""
Prepare metrics for usage, preprocessing is done in this function.
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
return self
class RocAuc(BaseEstimator, MetricMixin):
"""
Computes area under the ROC curve.
:param int positive_label: label of class, in case of more then two classes,
will compute ROC AUC for this specific class vs others
"""
def __init__(self, positive_label=1):
self.positive_label = positive_label
def fit(self, X, y, sample_weight=None):
"""
Prepare metrics for usage, preprocessing is done in this function.
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
X, y, self.sample_weight, _ = self._prepare(X, y, sample_weight=sample_weight)
# computing index of positive label
self.positive_index = self.classes_.tolist().index(self.positive_label)
self.true_class = (numpy.array(y) == self.positive_label)
return self
def __call__(self, y, proba, sample_weight=None):
assert numpy.all(self.classes_ < proba.shape[1])
return roc_auc_score(self.true_class, proba[:, self.positive_index],
sample_weight=self.sample_weight)
class LogLoss(BaseEstimator, MetricMixin):
"""
Log loss,
which is the same as minus log-likelihood,
and the same as logistic loss,
and the same as cross-entropy loss.
"""
def __init__(self, regularization=1e-15):
self.regularization = regularization
def fit(self, X, y, sample_weight=None):
"""
Prepare metrics for usage, preprocessing is done in this function.
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
X, y, sample_weight, self.class_indices = self._prepare(X, y, sample_weight=sample_weight)
self.sample_weight = sample_weight / sample_weight.sum()
self.samples_indices = numpy.arange(len(X))
return self
def __call__(self, y, proba, sample_weight=None):
# assert proba.shape == self.probabilities_shape, 'Wrong shape of probabilities'
assert numpy.all(self.classes_ < proba.shape[1])
correct_probabilities = proba[self.samples_indices, self.class_indices]
return - (numpy.log(correct_probabilities + self.regularization) * self.sample_weight).sum()
class OptimalMetric(BaseEstimator, MetricMixin):
"""
Class to calculate optimal threshold on predictions using some metric
:param function metric: metrics(s, b) -> float
:param expected_s: float, total weight of signal
:param expected_b: float, total weight of background
"""
def __init__(self, metric, expected_s=1., expected_b=1., signal_label=1):
self.metric = metric
self.expected_s = expected_s
self.expected_b = expected_b
self.signal_label = signal_label
def compute(self, y_true, proba, sample_weight=None):
"""
Compute metric for each possible prediction threshold
:param y_true: array-like true labels
:param proba: array-like of shape [n_samples, 2] with predicted probabilities
:param sample_weight: array-like weight
:rtype: tuple(array, array)
:return: thresholds and corresponding metric values
"""
y_true, proba, sample_weight = check_arrays(y_true, proba, sample_weight)
pred = proba[:, self.signal_label]
b, s, thresholds = roc_curve(y_true == self.signal_label, pred,
sample_weight=sample_weight)
metric_values = self.metric(s * self.expected_s, b * self.expected_b)
thresholds = numpy.clip(thresholds, pred.min() - 1e-6, pred.max() + 1e-6)
return thresholds, metric_values
def plot_vs_cut(self, y_true, proba, sample_weight=None):
"""
Compute metric for each possible prediction threshold
:param y_true: array-like true labels
:param proba: array-like of shape [n_samples, 2] with predicted probabilities
:param sample_weight: array-like weight
:rtype: plotting.FunctionsPlot
"""
from .. import plotting
y_true, proba, sample_weight = check_arrays(y_true, proba, sample_weight)
ordered_proba, metrics_val = self.compute(y_true, proba, sample_weight)
ind = numpy.argmax(metrics_val)
print('Optimal cut=%1.4f, quality=%1.4f' % (ordered_proba[ind], metrics_val[ind]))
plot_fig = plotting.FunctionsPlot({self.metric.__name__: (ordered_proba, metrics_val)})
plot_fig.xlabel = 'cut'
plot_fig.ylabel = 'metrics ' + self.metric.__name__
return plot_fig
def __call__(self, y_true, proba, sample_weight=None):
""" proba is predicted probabilities of shape [n_samples, 2] """
thresholds, metrics_val = self.compute(y_true, proba, sample_weight)
return numpy.max(metrics_val)
def significance(s, b):
"""
Approximate significance of discovery:
s / sqrt(b).
Here we use normalization, so maximal s and b are equal to 1.
"""
return s / numpy.sqrt(b + 1e-6)
class OptimalSignificance(OptimalMetric):
"""
Optimal values of significance:
s / sqrt(b)
:param float expected_s: expected amount of signal
:param float expected_b: expected amount of background
"""
def __init__(self, expected_s=1., expected_b=1.):
OptimalMetric.__init__(self, metric=significance,
expected_s=expected_s,
expected_b=expected_b)
def ams(s, b, br=10.):
"""
Regularized approximate median significance
:param s: amount of signal passed
:param b: amount of background passed
:param br: regularization
"""
radicand = 2 * ((s + b + br) * numpy.log(1.0 + s / (b + br)) - s)
return numpy.sqrt(radicand)
class OptimalAMS(OptimalMetric):
"""
Optimal values of AMS (average median significance)
default values of expected_s and expected_b are from HiggsML challenge.
:param float expected_s: expected amount of signal
:param float expected_b: expected amount of background
"""
def __init__(self, expected_s=691.988607712, expected_b=410999.847):
OptimalMetric.__init__(self, metric=ams,
expected_s=expected_s,
expected_b=expected_b)
class FPRatTPR(BaseEstimator, MetricMixin):
"""
Fix TPR value on roc curve and return FPR value.
"""
def __init__(self, tpr):
self.tpr = tpr
def __call__(self, y, proba, sample_weight=None):
if sample_weight is None:
sample_weight = numpy.ones(len(proba))
y, proba, sample_weight = check_arrays(y, proba, sample_weight)
threshold = weighted_percentile(proba[y == 1, 1], (1. - self.tpr), sample_weight=sample_weight[y == 1])
return numpy.sum(sample_weight[(y == 0) & (proba[:, 1] >= threshold)]) / sum(sample_weight[y == 0])
class TPRatFPR(BaseEstimator, MetricMixin):
"""
Fix FPR value on roc curve and return TPR value.
"""
def __init__(self, fpr):
self.fpr = fpr
def __call__(self, y, proba, sample_weight=None):
if sample_weight is None:
sample_weight = numpy.ones(len(proba))
y, proba, sample_weight = check_arrays(y, proba, sample_weight)
threshold = weighted_percentile(proba[y == 0, 1], (1 - self.fpr), sample_weight=sample_weight[y == 0])
return numpy.sum(sample_weight[(y == 1) & (proba[:, 1] > threshold)]) / sum(sample_weight[y == 1])
| apache-2.0 |
cancro7/gem5 | stats.py | 1 | 5030 | #!/usr/bin/env python
# This script parses the content of a list of file.
# To be correctly parsed that content should have
# the following format:
# NAME VALUE
# which is the exact format of the Gem5's stats files.
import sys
from os import listdir
from os.path import isdir
import argparse
# GOLDEN instance reference name
GOLDEN = 'GOLDEN.txt'
# Command line arguments
parser = argparse.ArgumentParser(description='Gem5 Stats')
parser.add_argument('-d', '--dir', type=str, dest='directory', required=True,
help='The root directory of the stats files to be parsed')
parser.add_argument('-g', '--graphical', dest='graphicalStats',
action='store_true',
help='It is true if we want to display graphical stats')
parser.set_defaults(graphicalStats=False)
parser.add_argument('-s', '--stats', type=str, dest='stats', required=True,
nargs='+', help='The statistics we want to display')
parser.add_argument('-c', '--csv', dest='csvStats',
action='store_true',
help='If true a CSV file will be generated for the given statistics')
parser.set_defaults(csvStats=False)
args = parser.parse_args()
# This data structure contains key-value fields where
# key is equal to a file name and value is equa to
# its respective parsed Stat object
statInstances = {}
# Print some simple statistics
def printstat(stats, props):
for p in props:
print p, "values"
for inst in stats:
print inst, stats[inst].get(p, 'not found')
# Display a barchart displaying the given statistics
def showgraph(stats, props):
# Iterate over all stats
for p in props:
# Store labels for the horizontal axis
labels = []
# Store values fro the vertical axis
values = []
for inst in stats:
labels.append(inst)
values.append(stats[inst].get(p, 0))
# Put GOLDEN value at first position
idx = labels.index(GOLDEN)
labels[idx], labels[0] = labels[0], labels[idx]
values[idx], values[0] = values[0], values[idx]
# Plot labels and values
fig, ax = plt.subplots()
rects = ax.bar(np.arange(len(labels)), tuple(values),
width=0.3, color='r')
ax.set_ylabel(p)
ax.set_title('Variation of ' + p)
ax.set_xticklabels(tuple(labels))
ax.set_xticks(np.arange(len(labels)) + 0.3 / 2)
autolabel(rects, ax)
plt.show()
# Create a CSV file containing one row per instance and the requested stats
def createcsv(stats, props):
with open('stats.csv', 'w') as csvfile:
swriter = csv.writer(csvfile,
delimiter=",",quotechar="|",quoting=csv.QUOTE_MINIMAL)
# Iterate over all instances
for inst in stats:
row = []
row.append(inst) # Instance name should be the first entry
# Iterate all the requested stats
for p in props:
row.append(stats[inst].get(p, 0))
# Write the CSV entry
swriter.writerow(row)
# Attach a text label above each bar displaying its height
def autolabel(rects, ax):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
# List all files in the directory
if __name__ == "__main__":
# Load statistics
# Star by listing all the files in the directory
for f in listdir(args.directory):
# If the file is a directory skip it
if isdir(f):
continue
# Otherwise start reading the file
with open("/".join([args.directory, f])) as statFile:
# Create the new statistic object
stat = {}
# Read all statistics
for line in statFile:
fields = line.split()
# if the line has less then 2 fields, skip it
if len(fields) < 2:
continue
# If the first character of the first field is '-'
# it means that the line is something written as
# an info message by Gem5
if len(fields[0]) > 0 and fields[0][0] == "-":
continue
# Otherwise get its key-value pair
# And store it into the Stat object
stat[fields[0]] = fields[1]
# Store the stat object
statInstances[f] = stat
# If we are in command line mode just display the requested statistics
if not args.graphicalStats:
printstat(statInstances, args.stats)
# Else show some graphics
else:
# Import the requested libraries
import numpy as np
import matplotlib.pyplot as plt
# Plot stats
showgraph(statInstances, args.stats)
# Create the CSV stat file if required
if args.csvStats:
# Import the requested library
import csv
# Generate CSV file
createcsv(statInstances, args.stats);
| bsd-3-clause |
seehuhn/py-jvplot | examples/demo9/demo9.py | 1 | 2308 | #! /usr/bin/env python3
import numpy as np
import pandas as pd
import jvplot
# load model outputs and ranges from ice core data
data = pd.read_csv('outputs.csv')
data = data.dropna()
ranges = pd.read_csv('ranges.csv')
# select required columns in the correct order (leaving out Renland)
names = ["NEEM", "NGRIP", "GRIP", "GISP2", "camp", "DYE3"]
data = data[names]
ranges = ranges[names]
# convert to numpy arrays
data = np.array(data)
ranges = np.array(ranges)
p = data.shape[1]
rr = [np.min(data), np.max(data)]
def scatter(pl, row, col, rect, x_range, y_range, style):
ax = pl.axes(x_lim=x_range, y_lim=y_range, rect=rect, style=style)
x_low, x_mid, x_high = ranges[:, col]
y_low, y_mid, y_high = ranges[:, row]
S = {
'rect_bg': 'rgba(255,0,0,.2)',
'rect_lw': 0,
}
ax.draw_rectangle([[None, y_low, None, y_high - y_low],
[x_low, None, x_high - x_low, None]],
style=S)
ax.draw_rectangle([x_low, y_low, x_high - x_low, y_high - y_low],
style=S)
S0 = {
'plot_point_col': 'rgba(0,0,0,.3)',
'plot_point_size': '4pt',
}
ax.draw_points(data[:, col], data[:, row], style=S0)
S1 = {
'plot_point_col': 'black',
'plot_point_size': '1pt',
}
ax.draw_points(data[:, col], data[:, row], style=S1)
def label(pl, row, col, rect, x_range, y_range, style):
plain = {
'axis_border_lw': 0,
'axis_ticks': '',
}
name = names[col]
if name == "camp":
name = "Camp\nCentury"
ax = pl.axes(x_lim=[0, 1], y_lim=[0, 1], rect=rect, style=plain)
ax.draw_text(name, .5, .5,
horizontal_align='center', vertical_align='center')
S0 = {
'axis_border_lw': '.8pt',
'axis_col': '#777',
'axis_tick_length': '1.5pt',
'axis_tick_lw': '.5pt',
'axis_tick_spacing_x': '2mm',
'axis_tick_spacing_y': '0.1mm',
'axis_ticks': 'BLTR',
'tick_font_size': '6pt',
'margin_bottom': '3mm',
'margin_left': '3mm',
'margin_top': '3mm',
'margin_right': '3mm',
'padding': '1pt',
}
with jvplot.Plot('demo9.pdf', '5.5in', '5.5in') as pl:
pl.grid_plot([rr]*p, x_names=names,
upper_fn=scatter, diag_fn=label, lower_fn=scatter,
style=S0)
| gpl-3.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/colors.py | 1 | 71098 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of colors called
a colormap. Colormapping typically involves two steps: a data array is first
mapped onto the range 0-1 using an instance of :class:`Normalize` or of a
subclass; then this number in the 0-1 range is mapped to a color using an
instance of a subclass of :class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all the built-in
colormap instances, but is also useful for making custom colormaps, and
:class:`ListedColormap`, which is used for generating a custom colormap from a
list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single color
specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic built-in colors, you can use a single letter
- b: blue
- g: green
- r: red
- c: cyan
- m: magenta
- y: yellow
- k: black
- w: white
Gray shades can be given as a string encoding a float in the 0-1 range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify the
color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in
the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'
are supported.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
import warnings
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
cnames = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksage': '#598556',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgreen': '#90EE90',
'lightgray': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsage': '#BCECAC',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sage': '#87AE73',
'sandybrown': '#FAA460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
# add british equivs
for k, v in list(six.iteritems(cnames)):
if k.find('gray') >= 0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
a = '#%02x%02x%02x' % tuple([int(np.round(val * 255)) for val in rgb[:3]])
return a
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, six.string_types):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16) / 255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter(object):
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b': (0.0, 0.0, 1.0),
'g': (0.0, 0.5, 0.0),
'r': (1.0, 0.0, 0.0),
'c': (0.0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0.0, 0.0, 0.0),
'w': (1.0, 1.0, 1.0), }
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a string representation of a float, like '0.4',
indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
try:
return self.cache[arg]
except KeyError:
pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try:
return self.cache[arg]
except KeyError:
pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
argl = arg.lower()
color = self.colors.get(argl, None)
if color is None:
str1 = cnames.get(argl, argl)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(argl)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = (fl,)*3
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4' % len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError) as exc:
raise ValueError(
'to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if arg.lower() == 'none':
return (0.0, 0.0, 0.0, 0.0)
except AttributeError:
pass
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], alpha
if len(arg) == 3:
r, g, b = arg
if any(float(x) < 0 or x > 1 for x in arg):
raise ValueError(
'number in rbg sequence outside 0-1 range')
else:
raise ValueError(
'length of rgba sequence should be either 3 or 4')
else:
r, g, b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r, g, b, alpha
except (TypeError, ValueError) as exc:
raise ValueError(
'to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
nc = len(c)
except TypeError:
raise ValueError(
"Cannot convert argument type %s to rgba array" % type(c))
try:
if nc == 0 or c.lower() == 'none':
return np.zeros((0, 4), dtype=np.float)
except AttributeError:
pass
try:
# Single value? Put it in an array with a single row.
return np.array([self.to_rgba(c, alpha)], dtype=np.float)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
if (c.ravel() > 1).any() or (c.ravel() < 0).any():
raise ValueError(
"number in rgba sequence is outside 0-1 range")
result = np.asarray(c, np.float)
if alpha is not None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must be in 0-1 range")
result[:, 3] = alpha
return result
# This alpha operation above is new, and depends
# on higher levels to refrain from setting alpha
# to values other than None unless there is
# intent to override any existing alpha values.
# It must be some other sequence of color specs.
result = np.zeros((nc, 4), dtype=np.float)
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha)
return result
colorConverter = ColorConverter()
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if six.callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 or shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x) - x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N - 1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut[1:-1] = distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1]
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
class Colormap(object):
"""
Baseclass for all scalar to RGBA mappings.
Typically Colormap instances are used to convert data values (floats) from
the interval ``[0, 1]`` to the RGBA color that the respective Colormap
represents. For scaling of data into the ``[0, 1]`` interval see
:class:`matplotlib.colors.Normalize`. It is worth noting that
:class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this
``data->normalize->map-to-color`` processing chain.
"""
def __init__(self, name, N=256):
r"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of rgb quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: :class:`matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : scalar, ndarray
The data value(s) to convert to RGBA.
For floats, X should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, X should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float, None
Alpha must be a scalar between 0 and 1, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, othewise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
# See class docstring for arg/kwarg documentation.
if not self._isinit:
self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=True) # Copy here to avoid side effects.
mask_bad = xma.mask # Mask will be used below.
xa = xma.filled() # Fill to avoid infs, etc.
del xma
# Calculations with native byteorder are faster, and avoid a
# bug that otherwise can occur with putmask when the last
# argument is a numpy scalar.
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder()
if xa.dtype.kind == "f":
# Treat 1.0 as slightly less than 1.
vals = np.array([1, 0], dtype=xa.dtype)
almost_one = np.nextafter(*vals)
cbook._putmask(xa, xa == 1.0, almost_one)
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
np.clip(xa, -1, self.N, out=xa)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa < 0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa > self.N - 1, self._i_over)
cbook._putmask(xa, xa < 0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:, -1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
if vtype == 'scalar':
rgba = tuple(rgba[0, :])
return rgba
def set_bad(self, color='k', alpha=None):
"""Set color to be used for masked values.
"""
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_under(self, color='k', alpha=None):
"""Set color to be used for low out-of-range values.
Requires norm.clip = False
"""
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_over(self, color='k', alpha=None):
"""Set color to be used for high out-of-range values.
Requires norm.clip = False
"""
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, self._lut"""
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit:
self._init()
return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and
np.alltrue(self._lut[:, 0] == self._lut[:, 2]))
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
raise NotImplementedError()
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = makeMappingArray(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = list(zip(*colors))
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[], alpha=[])
for val, color in zip(vals, colors):
r, g, b, a = colorConverter.to_rgba(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
cdict['alpha'].append((val, a, a))
return LinearSegmentedColormap(name, cdict, N, gamma)
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
return LinearSegmentedColormap(self.name, self._segmentdata, lutsize)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name='from_list', N=None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 or Nx4 floating point array
(*N* rgb or rgba values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are
# identical; needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try:
gray = float(self.colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgba = colorConverter.to_rgba_array(self.colors)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3] = rgba
self._isinit = True
self._set_extremes()
def _resample(self, lutsize):
"""
Return a new color map with *lutsize* entries.
"""
return ListedColormap(self.name, self.colors, lutsize)
class Normalize(object):
"""
A class which, when called, can normalize data into
the ``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are initialized from the
minimum and maximum value respectively of the first input
processed. That is, *__call__(A)* calls *autoscale_None(A)*.
If *clip* is *True* and the given value falls outside the range,
the returned value will be 0 or 1, whichever is closer.
Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
if cbook.iterable(value):
is_scalar = False
result = ma.asarray(value)
if result.dtype.kind == 'f':
if isinstance(value, np.ndarray):
result = result.copy()
elif result.dtype.itemsize > 2:
result = result.astype(np.float)
else:
result = result.astype(np.float32)
else:
is_scalar = True
result = ma.array([value]).astype(np.float)
return result, is_scalar
def __call__(self, value, clip=None):
"""
Normalize *value* data in the ``[vmin, vmax]`` interval into
the ``[0.0, 1.0]`` interval and return it. *clip* defaults
to *self.clip* (which defaults to *False*). If not already
initialized, *vmin* and *vmax* are initialized using
*autoscale_None(value)*.
"""
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin = float(self.vmin)
vmax = float(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax / vmin), val)
else:
return vmin * pow((vmax / vmin), value)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
def __init__(self, linthresh, linscale=1.0,
vmin=None, vmax=None, clip=False):
"""
*linthresh*:
The range within which the plot is linear (to
avoid having the plot go to infinity around zero).
*linscale*:
This allows the linear range (-*linthresh* to *linthresh*)
to be stretched relative to the logarithmic range. Its
value is the number of decades to use for each half of the
linear range. For example, when *linscale* == 1.0 (the
default), the space used for the positive and negative
halves of the linear range will be equal to one decade in
the logarithmic range. Defaults to 1.
"""
Normalize.__init__(self, vmin, vmax, clip)
self.linthresh = float(linthresh)
self._linscale_adj = (linscale / (1.0 - np.e ** -1))
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# in-place equivalent of above can be much faster
resdat = self._transform(result.data)
resdat -= self._lower
resdat /= (self._upper - self._lower)
if is_scalar:
result = result[0]
return result
def _transform(self, a):
"""
Inplace transformation.
"""
masked = np.abs(a) > self.linthresh
sign = np.sign(a[masked])
log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))
log *= sign * self.linthresh
a[masked] = log
a[~masked] *= self._linscale_adj
return a
def _inv_transform(self, a):
"""
Inverse inplace Transformation.
"""
masked = np.abs(a) > (self.linthresh * self._linscale_adj)
sign = np.sign(a[masked])
exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)
exp *= sign * self.linthresh
a[masked] = exp
a[~masked] /= self._linscale_adj
return a
def _transform_vmin_vmax(self):
"""
Calculates vmin and vmax in the transformed system.
"""
vmin, vmax = self.vmin, self.vmax
arr = np.array([vmax, vmin]).astype(np.float)
self._upper, self._lower = self._transform(arr)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
val = ma.asarray(value)
val = val * (self._upper - self._lower) + self._lower
return self._inv_transform(val)
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
self.vmax = ma.max(A)
self._transform_vmin_vmax()
def autoscale_None(self, A):
""" autoscale only None-valued vmin or vmax """
if self.vmin is not None and self.vmax is not None:
pass
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
self._transform_vmin_vmax()
class PowerNorm(Normalize):
"""
Normalize a given value to the ``[0, 1]`` interval with a power-law
scaling. This will clip any negative data points to 0.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
Normalize.__init__(self, vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
res_mask = result.data < 0
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
np.power(resdat, gamma, resdat)
resdat /= (vmax - vmin) ** gamma
result = np.ma.array(resdat, mask=result.mask, copy=False)
result[res_mask] = 0
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return ma.power(val, 1. / gamma) * (vmax - vmin) + vmin
else:
return pow(value, 1. / gamma) * (vmax - vmin) + vmin
def autoscale(self, A):
"""
Set *vmin*, *vmax* to min, max of *A*.
"""
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None and np.size(A) > 0:
self.vmin = ma.min(A)
if self.vmin < 0:
self.vmin = 0
warnings.warn("Power-law scaling on negative values is "
"ill-defined, clamping to 0.")
if self.vmax is None and np.size(A) > 0:
self.vmax = ma.max(A)
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
"""
def __init__(self, boundaries, ncolors, clip=False):
"""
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped
to -1 if low and ncolors if high; these are converted
to valid indices by
:meth:`Colormap.__call__` .
If clip == True, out-of-range values
are mapped to 0 if low and ncolors-1 if high.
"""
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N - 1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = ma.getmaskarray(xx)
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
iret = np.zeros(xx.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx >= b] = i
if self._interp:
scalefac = float(self.Ncmap - 1) / (self.N - 2)
iret = (iret * scalefac).astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
"""
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
def rgb_to_hsv(arr):
"""
convert float rgb values (in the range [0, 1]), in a numpy array to hsv
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
hsv : (..., 3) ndarray
Colors converted to hsv values in range [0, 1]
"""
# make sure it is an ndarray
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=arr.shape))
in_ndim = arr.ndim
if arr.ndim == 1:
arr = np.array(arr, ndmin=2)
# make sure we don't have an int image
if arr.dtype.kind in ('iu'):
arr = arr.astype(np.float32)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
if in_ndim == 1:
out.shape = (3,)
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
all values assumed to be in range [0, 1]
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
rgb : (..., 3) ndarray
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=hsv.shape))
# if we got pased a 1D array, try to treat as
# a single color and reshape as needed
in_ndim = hsv.ndim
if in_ndim == 1:
hsv = np.array(hsv, ndmin=2)
# make sure we don't have an int image
if hsv.dtype.kind in ('iu'):
hsv = hsv.astype(np.float32)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(np.int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[..., 0] = r
rgb[..., 1] = g
rgb[..., 2] = b
if in_ndim == 1:
rgb.shape = (3, )
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce "shaded" rgb values for a data array.
:meth:`shade_rgb` can be used to combine an rgb image with
The :meth:`shade_rgb`
The :meth:`hillshade` produces an illumination map of a surface.
"""
def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
hsv_min_sat=1, hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
Parameters
----------
azdeg : number, optional
The azimuth (0-360, degrees clockwise from North) of the light
source. Defaults to 315 degrees (from the northwest).
altdeg : number, optional
The altitude (0-90, degrees up from horizontal) of the light
source. Defaults to 45 degrees from horizontal.
Notes
-----
For backwards compatibility, the parameters *hsv_min_val*,
*hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
initialization as well. However, these parameters will only be used if
"blend_mode='hsv'" is passed into :meth:`shade` or :meth:`shade_rgb`.
See the documentation for :meth:`blend_hsv` for more details.
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
"""
Calculates the illumination intensity for a surface using the defined
azimuth and elevation for the light source.
Imagine an artificial sun placed at infinity in some azimuth and
elevation position illuminating our surface. The parts of the surface
that slope toward the sun should brighten while those sides facing away
should become darker.
Parameters
----------
elevation : array-like
A 2d array (or equivalent) of the height values used to generate an
illumination map
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topographic effects.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
intensity : ndarray
A 2d array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
# Azimuth is in degrees clockwise from North. Convert to radians
# counterclockwise from East (mathematical notation).
az = np.radians(90 - self.azdeg)
alt = np.radians(self.altdeg)
# Because most image and raster GIS data has the first row in the array
# as the "top" of the image, dy is implicitly negative. This is
# consistent to what `imshow` assumes, as well.
dy = -dy
# Calculate the intensity from the illumination angle
dy, dx = np.gradient(vert_exag * elevation, dy, dx)
# The aspect is defined by the _downhill_ direction, thus the negative
aspect = np.arctan2(-dy, -dx)
slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))
intensity = (np.sin(alt) * np.sin(slope) +
np.cos(alt) * np.cos(slope) * np.cos(az - aspect))
# Apply contrast stretch
imin, imax = intensity.min(), intensity.max()
intensity *= fraction
# Rescale to 0-1, keeping range before contrast stretch
# If constant slope, keep relative scaling (i.e. flat should be 0.5,
# fully occluded 0, etc.)
if (imax - imin) > 1e-6:
# Strictly speaking, this is incorrect. Negative values should be
# clipped to 0 because they're fully occluded. However, rescaling
# in this manner is consistent with the previous implementation and
# visually appears better than a "hard" clip.
intensity -= imin
intensity /= (imax - imin)
intensity = np.clip(intensity, 0, 1, intensity)
return intensity
def shade(self, data, cmap, norm=None, blend_mode='hsv', vmin=None,
vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
"""
Combine colormapped data values with an illumination intensity map
(a.k.a. "hillshade") of the values.
Parameters
----------
data : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
cmap : `~matplotlib.colors.Colormap` instance
The colormap used to color the *data* array. Note that this must be
a `~matplotlib.colors.Colormap` instance. For example, rather than
passing in `cmap='gist_earth'`, use
`cmap=plt.get_cmap('gist_earth')` instead.
norm : `~matplotlib.colors.Normalize` instance, optional
The normalization used to scale values before colormapping. If
None, the input will be linearly scaled between its min and max.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vmin : scalar or None, optional
The minimum value used in colormapping *data*. If *None* the
minimum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vmax : scalar or None, optional
The maximum value used in colormapping *data*. If *None* the
maximum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
rgba : ndarray
An MxNx4 array of floats ranging between 0-1.
"""
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if norm is None:
norm = Normalize(vmin=vmin, vmax=vmax)
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
vert_exag=vert_exag, dx=dx, dy=dy,
fraction=fraction, **kwargs)
# Don't overwrite the alpha channel, if present.
rgb0[..., :3] = rgb1[..., :3]
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
vert_exag=1, dx=1, dy=1, **kwargs):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
Parameters
----------
rgb : array-like
An MxNx3 RGB array, assumed to be in the range of 0 to 1.
elevation : array-like
A 2d array (or equivalent) of the height values used to generate a
shaded map.
fraction : number
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature `func(rgb, illum, **kwargs)`)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs meters) or to exaggerate
or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
shaded_rgb : ndarray
An MxNx3 array of floats ranging between 0-1.
"""
# Calculate the "hillshade" intensity.
intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
intensity = intensity[..., np.newaxis]
# Blend the hillshade and rgb data using the specified mode
lookup = {
'hsv': self.blend_hsv,
'soft': self.blend_soft_light,
'overlay': self.blend_overlay,
}
if blend_mode in lookup:
blend = lookup[blend_mode](rgb, intensity, **kwargs)
else:
try:
blend = blend_mode(rgb, intensity, **kwargs)
except TypeError:
msg = '"blend_mode" must be callable or one of {0}'
raise ValueError(msg.format(lookup.keys))
# Only apply result where hillshade intensity isn't masked
if hasattr(intensity, 'mask'):
mask = intensity.mask[..., 0]
for i in range(3):
blend[..., i][mask] = rgb[..., i][mask]
return blend
def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
hsv_min_val=None, hsv_min_sat=None):
"""
Take the input data array, convert to HSV values in the given colormap,
then adjust those color values to give the impression of a shaded
relief map with a specified light source. RGBA values are returned,
which can then be used to plot the shaded image with imshow.
The color of the resulting image will be darkened by moving the (s,v)
values (in hsv colorspace) toward (hsv_min_sat, hsv_min_val) in the
shaded regions, or lightened by sliding (s,v) toward (hsv_max_sat
hsv_max_val) in regions that are illuminated. The default extremes are
chose so that completely shaded points are nearly black (s = 1, v = 0)
and completely illuminated points are nearly white (s = 0, v = 1).
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
hsv_max_sat : number, optional
The maximum saturation value that the *intensity* map can shift the
output image to. Defaults to 1.
hsv_min_sat : number, optional
The minimum saturation value that the *intensity* map can shift the
output image to. Defaults to 0.
hsv_max_val : number, optional
The maximum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 1.
hsv_min_val: number, optional
The minimum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 0.
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
# Backward compatibility...
if hsv_max_sat is None:
hsv_max_sat = self.hsv_max_sat
if hsv_max_val is None:
hsv_max_val = self.hsv_max_val
if hsv_min_sat is None:
hsv_min_sat = self.hsv_min_sat
if hsv_min_val is None:
hsv_min_val = self.hsv_min_val
# Expects a 2D intensity array scaled between -1 to 1...
intensity = intensity[..., 0]
intensity = 2 * intensity - 1
# convert to rgb, then rgb to hsv
hsv = rgb_to_hsv(rgb[:, :, 0:3])
# modify hsv values to simulate illumination.
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity > 0),
((1. - intensity) * hsv[:, :, 1] +
intensity * hsv_max_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity > 0,
((1. - intensity) * hsv[:, :, 2] +
intensity * hsv_max_val),
hsv[:, :, 2])
hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,
intensity < 0),
((1. + intensity) * hsv[:, :, 1] -
intensity * hsv_min_sat),
hsv[:, :, 1])
hsv[:, :, 2] = np.where(intensity < 0,
((1. + intensity) * hsv[:, :, 2] -
intensity * hsv_min_val),
hsv[:, :, 2])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])
hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def blend_soft_light(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "soft light"
blending. Uses the "pegtop" formula.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
def blend_overlay(self, rgb, intensity):
"""
Combines an rgb image with an intensity map using "overlay" blending.
Parameters
----------
rgb : ndarray
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : ndarray
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
rgb : ndarray
An MxNx3 RGB array representing the combined images.
"""
low = 2 * intensity * rgb
high = 1 - 2 * (1 - intensity) * (1 - rgb)
return np.where(rgb <= 0.5, low, high)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the :class:`BoundaryNorm`.
Values ``v`` are quantizized to level ``i`` if
``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If `extend` is "neither" there
must be ``n_level - 1`` colors. For an `extend` of "min" or "max" add
one extra color, and for an `extend` of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See :func:`~matplotlib.pyplot.contourf` for details.
Returns
-------
(cmap, norm) : tuple containing a :class:`Colormap` and a \
:class:`Normalize` instance
"""
colors_i0 = 0
colors_i1 = None
if extend == 'both':
colors_i0 = 1
colors_i1 = -1
extra_colors = 2
elif extend == 'min':
colors_i0 = 1
extra_colors = 1
elif extend == 'max':
colors_i1 = -1
extra_colors = 1
elif extend == 'neither':
extra_colors = 0
else:
raise ValueError('Unexpected value for extend: {0!r}'.format(extend))
n_data_colors = len(levels) - 1
n_expected_colors = n_data_colors + extra_colors
if len(colors) != n_expected_colors:
raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'
' n_colors == {2!r}. Got {3!r}.'
''.format(extend, len(levels), n_expected_colors,
len(colors)))
cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
| mit |
sarahgrogan/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
Ziqi-Li/bknqgis | Shapely/docs/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| gpl-2.0 |
nmartensen/pandas | pandas/tseries/frequencies.py | 1 | 29813 | from datetime import timedelta
from pandas.compat import long, zip
from pandas import compat
import re
import warnings
import numpy as np
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_period_arraylike,
is_timedelta64_dtype,
is_datetime64_dtype)
import pandas.core.algorithms as algos
from pandas.core.algorithms import unique
from pandas.tseries.offsets import DateOffset
from pandas.util._decorators import cache_readonly, deprecate_kwarg
import pandas.tseries.offsets as offsets
from pandas._libs import lib, tslib
from pandas._libs.tslib import Timedelta
from pandas._libs.tslibs.frequencies import ( # noqa
get_freq_code, _base_and_stride, _period_str_to_code,
_INVALID_FREQ_ERROR, opattern, _lite_rule_alias, _dont_uppercase,
_period_code_map, _reverse_period_code_map)
from pytz import AmbiguousTimeError
class FreqGroup(object):
FR_ANN = 1000
FR_QTR = 2000
FR_MTH = 3000
FR_WK = 4000
FR_BUS = 5000
FR_DAY = 6000
FR_HR = 7000
FR_MIN = 8000
FR_SEC = 9000
FR_MS = 10000
FR_US = 11000
FR_NS = 12000
RESO_NS = 0
RESO_US = 1
RESO_MS = 2
RESO_SEC = 3
RESO_MIN = 4
RESO_HR = 5
RESO_DAY = 6
class Resolution(object):
RESO_US = RESO_US
RESO_MS = RESO_MS
RESO_SEC = RESO_SEC
RESO_MIN = RESO_MIN
RESO_HR = RESO_HR
RESO_DAY = RESO_DAY
_reso_str_map = {
RESO_NS: 'nanosecond',
RESO_US: 'microsecond',
RESO_MS: 'millisecond',
RESO_SEC: 'second',
RESO_MIN: 'minute',
RESO_HR: 'hour',
RESO_DAY: 'day'
}
# factor to multiply a value by to convert it to the next finer grained
# resolution
_reso_mult_map = {
RESO_NS: None,
RESO_US: 1000,
RESO_MS: 1000,
RESO_SEC: 1000,
RESO_MIN: 60,
RESO_HR: 60,
RESO_DAY: 24
}
_reso_str_bump_map = {
'D': 'H',
'H': 'T',
'T': 'S',
'S': 'L',
'L': 'U',
'U': 'N',
'N': None
}
_str_reso_map = dict([(v, k) for k, v in compat.iteritems(_reso_str_map)])
_reso_freq_map = {
'year': 'A',
'quarter': 'Q',
'month': 'M',
'day': 'D',
'hour': 'H',
'minute': 'T',
'second': 'S',
'millisecond': 'L',
'microsecond': 'U',
'nanosecond': 'N'}
_freq_reso_map = dict([(v, k)
for k, v in compat.iteritems(_reso_freq_map)])
@classmethod
def get_str(cls, reso):
"""
Return resolution str against resolution code.
Example
-------
>>> Resolution.get_str(Resolution.RESO_SEC)
'second'
"""
return cls._reso_str_map.get(reso, 'day')
@classmethod
def get_reso(cls, resostr):
"""
Return resolution str against resolution code.
Example
-------
>>> Resolution.get_reso('second')
2
>>> Resolution.get_reso('second') == Resolution.RESO_SEC
True
"""
return cls._str_reso_map.get(resostr, cls.RESO_DAY)
@classmethod
def get_freq_group(cls, resostr):
"""
Return frequency str against resolution str.
Example
-------
>>> f.Resolution.get_freq_group('day')
4000
"""
return get_freq_group(cls.get_freq(resostr))
@classmethod
def get_freq(cls, resostr):
"""
Return frequency str against resolution str.
Example
-------
>>> f.Resolution.get_freq('day')
'D'
"""
return cls._reso_freq_map[resostr]
@classmethod
def get_str_from_freq(cls, freq):
"""
Return resolution str against frequency str.
Example
-------
>>> Resolution.get_str_from_freq('H')
'hour'
"""
return cls._freq_reso_map.get(freq, 'day')
@classmethod
def get_reso_from_freq(cls, freq):
"""
Return resolution code against frequency str.
Example
-------
>>> Resolution.get_reso_from_freq('H')
4
>>> Resolution.get_reso_from_freq('H') == Resolution.RESO_HR
True
"""
return cls.get_reso(cls.get_str_from_freq(freq))
@classmethod
def get_stride_from_decimal(cls, value, freq):
"""
Convert freq with decimal stride into a higher freq with integer stride
Parameters
----------
value : integer or float
freq : string
Frequency string
Raises
------
ValueError
If the float cannot be converted to an integer at any resolution.
Example
-------
>>> Resolution.get_stride_from_decimal(1.5, 'T')
(90, 'S')
>>> Resolution.get_stride_from_decimal(1.04, 'H')
(3744, 'S')
>>> Resolution.get_stride_from_decimal(1, 'D')
(1, 'D')
"""
if np.isclose(value % 1, 0):
return int(value), freq
else:
start_reso = cls.get_reso_from_freq(freq)
if start_reso == 0:
raise ValueError(
"Could not convert to integer offset at any resolution"
)
next_value = cls._reso_mult_map[start_reso] * value
next_name = cls._reso_str_bump_map[freq]
return cls.get_stride_from_decimal(next_value, next_name)
def get_to_timestamp_base(base):
"""
Return frequency code group used for base of to_timestamp against
frequency code.
Example
-------
# Return day freq code against longer freq than day
>>> get_to_timestamp_base(get_freq_code('D')[0])
6000
>>> get_to_timestamp_base(get_freq_code('W')[0])
6000
>>> get_to_timestamp_base(get_freq_code('M')[0])
6000
# Return second freq code against hour between second
>>> get_to_timestamp_base(get_freq_code('H')[0])
9000
>>> get_to_timestamp_base(get_freq_code('S')[0])
9000
"""
if base < FreqGroup.FR_BUS:
return FreqGroup.FR_DAY
if FreqGroup.FR_HR <= base <= FreqGroup.FR_SEC:
return FreqGroup.FR_SEC
return base
def get_freq_group(freq):
"""
Return frequency code group of given frequency str or offset.
Example
-------
>>> get_freq_group('W-MON')
4000
>>> get_freq_group('W-FRI')
4000
"""
if isinstance(freq, offsets.DateOffset):
freq = freq.rule_code
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
elif isinstance(freq, int):
pass
else:
raise ValueError('input must be str, offset or int')
return (freq // 1000) * 1000
def get_freq(freq):
"""
Return frequency code of given frequency str.
If input is not string, return input as it is.
Example
-------
>>> get_freq('A')
1000
>>> get_freq('3A')
1000
"""
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq
def _get_freq_str(base, mult=1):
code = _reverse_period_code_map.get(base)
if mult == 1:
return code
return str(mult) + code
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
from pandas.tseries.offsets import (Nano, Micro, Milli, Second, # noqa
Minute, Hour,
Day, BDay, CDay, Week, MonthBegin,
MonthEnd, BMonthBegin, BMonthEnd,
QuarterBegin, QuarterEnd, BQuarterBegin,
BQuarterEnd, YearBegin, YearEnd,
BYearBegin, BYearEnd, prefix_mapping)
try:
cday = CDay()
except NotImplementedError:
cday = None
#: cache of previously seen offsets
_offset_map = {}
_offset_to_period_map = {
'WEEKDAY': 'D',
'EOM': 'M',
'BM': 'M',
'BQS': 'Q',
'QS': 'Q',
'BQ': 'Q',
'BA': 'A',
'AS': 'A',
'BAS': 'A',
'MS': 'M',
'D': 'D',
'C': 'C',
'B': 'B',
'T': 'T',
'S': 'S',
'L': 'L',
'U': 'U',
'N': 'N',
'H': 'H',
'Q': 'Q',
'A': 'A',
'W': 'W',
'M': 'M',
'Y': 'A',
'BY': 'A',
'YS': 'A',
'BYS': 'A',
}
need_suffix = ['QS', 'BQ', 'BQS', 'YS', 'AS', 'BY', 'BA', 'BYS', 'BAS']
for __prefix in need_suffix:
for _m in tslib._MONTHS:
_alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m)
_offset_to_period_map[_alias] = _offset_to_period_map[__prefix]
for __prefix in ['A', 'Q']:
for _m in tslib._MONTHS:
_alias = '{prefix}-{month}'.format(prefix=__prefix, month=_m)
_offset_to_period_map[_alias] = _alias
_days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for _d in _days:
_alias = 'W-{day}'.format(day=_d)
_offset_to_period_map[_alias] = _alias
def get_period_alias(offset_str):
""" alias to closest period strings BQ->Q etc"""
return _offset_to_period_map.get(offset_str, None)
_name_to_offset_map = {'days': Day(1),
'hours': Hour(1),
'minutes': Minute(1),
'seconds': Second(1),
'milliseconds': Milli(1),
'microseconds': Micro(1),
'nanoseconds': Nano(1)}
@deprecate_kwarg(old_arg_name='freqstr', new_arg_name='freq')
def to_offset(freq):
"""
Return DateOffset object from string or tuple representation
or datetime.timedelta object
Parameters
----------
freq : str, tuple, datetime.timedelta, DateOffset or None
Returns
-------
delta : DateOffset
None if freq is None
Raises
------
ValueError
If freq is an invalid frequency
See Also
--------
pandas.DateOffset
Examples
--------
>>> to_offset('5min')
<5 * Minutes>
>>> to_offset('1D1H')
<25 * Hours>
>>> to_offset(('W', 2))
<2 * Weeks: weekday=6>
>>> to_offset((2, 'B'))
<2 * BusinessDays>
>>> to_offset(datetime.timedelta(days=1))
<Day>
>>> to_offset(Hour())
<Hour>
"""
if freq is None:
return None
if isinstance(freq, DateOffset):
return freq
if isinstance(freq, tuple):
name = freq[0]
stride = freq[1]
if isinstance(stride, compat.string_types):
name, stride = stride, name
name, _ = _base_and_stride(name)
delta = get_offset(name) * stride
elif isinstance(freq, timedelta):
delta = None
freq = Timedelta(freq)
try:
for name in freq.components._fields:
offset = _name_to_offset_map[name]
stride = getattr(freq.components, name)
if stride != 0:
offset = stride * offset
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(_INVALID_FREQ_ERROR.format(freq))
else:
delta = None
stride_sign = None
try:
splitted = re.split(opattern, freq)
if splitted[-1] != '' and not splitted[-1].isspace():
# the last element must be blank
raise ValueError('last element must be blank')
for sep, stride, name in zip(splitted[0::4], splitted[1::4],
splitted[2::4]):
if sep != '' and not sep.isspace():
raise ValueError('separator must be spaces')
prefix = _lite_rule_alias.get(name) or name
if stride_sign is None:
stride_sign = -1 if stride.startswith('-') else 1
if not stride:
stride = 1
if prefix in Resolution._reso_str_bump_map.keys():
stride, name = Resolution.get_stride_from_decimal(
float(stride), prefix
)
stride = int(stride)
offset = get_offset(name)
offset = offset * int(np.fabs(stride) * stride_sign)
if delta is None:
delta = offset
else:
delta = delta + offset
except Exception:
raise ValueError(_INVALID_FREQ_ERROR.format(freq))
if delta is None:
raise ValueError(_INVALID_FREQ_ERROR.format(freq))
return delta
def get_base_alias(freqstr):
"""
Returns the base frequency alias, e.g., '5D' -> 'D'
"""
return _base_and_stride(freqstr)[0]
def get_offset(name):
"""
Return DateOffset object associated with rule name
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
if name not in _dont_uppercase:
name = name.upper()
name = _lite_rule_alias.get(name, name)
name = _lite_rule_alias.get(name.lower(), name)
else:
name = _lite_rule_alias.get(name, name)
if name not in _offset_map:
try:
split = name.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too
# many '-')
offset = klass._from_name(*split[1:])
except (ValueError, TypeError, KeyError):
# bad prefix or suffix
raise ValueError(_INVALID_FREQ_ERROR.format(name))
# cache
_offset_map[name] = offset
# do not return cache because it's mutable
return _offset_map[name].copy()
getOffset = get_offset
def get_standard_freq(freq):
"""
Return the standardized frequency string
"""
msg = ("get_standard_freq is deprecated. Use to_offset(freq).rule_code "
"instead.")
warnings.warn(msg, FutureWarning, stacklevel=2)
return to_offset(freq).rule_code
# ---------------------------------------------------------------------
# Period codes
def infer_freq(index, warn=True):
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
if passed a Series will use the values of the series (NOT THE INDEX)
warn : boolean, default True
Returns
-------
freq : string or None
None if no discernible frequency
TypeError if the index is not datetime-like
ValueError if there are less than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (is_datetime64_dtype(values) or
is_timedelta64_dtype(values) or
values.dtype == object):
raise TypeError("cannot infer freq from a non-convertible dtype "
"on a Series of {dtype}".format(dtype=index.dtype))
index = values
if is_period_arraylike(index):
raise TypeError("PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq.")
elif isinstance(index, pd.TimedeltaIndex):
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError("cannot infer freq from a non-convertible index "
"type {type}".format(type=type(index)))
index = index.values
if not isinstance(index, pd.DatetimeIndex):
try:
index = pd.DatetimeIndex(index)
except AmbiguousTimeError:
index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
_ONE_MICRO = long(1000)
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
class _FrequencyInferer(object):
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn=True):
self.index = index
self.values = np.asarray(index).view('i8')
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index, 'tz'):
if index.tz is not None:
self.values = tslib.tz_convert(self.values, 'UTC', index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError('Need at least 3 dates to infer frequency')
self.is_monotonic = (self.index.is_monotonic_increasing or
self.index.is_monotonic_decreasing)
@cache_readonly
def deltas(self):
return tslib.unique_deltas(self.values)
@cache_readonly
def deltas_asi8(self):
return tslib.unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self):
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self):
return len(self.deltas_asi8) == 1
def get_freq(self):
if not self.is_monotonic or not self.index.is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
else:
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return 'BH'
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count('H', delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count('T', delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count('S', delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count('L', delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count('U', delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count('N', delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def hour_deltas(self):
return [x / _ONE_HOUR for x in self.deltas]
@cache_readonly
def fields(self):
return tslib.build_field_sarray(self.values)
@cache_readonly
def rep_stamp(self):
return lib.Timestamp(self.values[0])
def month_position_check(self):
# TODO: cythonize this, very slow
calendar_end = True
business_end = True
calendar_start = True
business_start = True
years = self.fields['Y']
months = self.fields['M']
days = self.fields['D']
weekdays = self.index.dayofweek
from calendar import monthrange
for y, m, d, wd in zip(years, months, days, weekdays):
if calendar_start:
calendar_start &= d == 1
if business_start:
business_start &= d == 1 or (d <= 3 and wd == 0)
if calendar_end or business_end:
_, daysinmonth = monthrange(y, m)
cal = d == daysinmonth
if calendar_end:
calendar_end &= cal
if business_end:
business_end &= cal or (daysinmonth - d < 3 and wd == 4)
elif not calendar_start and not business_start:
break
if calendar_end:
return 'ce'
elif business_end:
return 'be'
elif calendar_start:
return 'cs'
elif business_start:
return 'bs'
else:
return None
@cache_readonly
def mdiffs(self):
nmonths = self.fields['Y'] * 12 + self.fields['M']
return tslib.unique_deltas(nmonths.astype('i8'))
@cache_readonly
def ydiffs(self):
return tslib.unique_deltas(self.fields['Y'].astype('i8'))
def _infer_daily_rule(self):
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = _month_aliases[self.rep_stamp.month]
alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month)
return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = _month_aliases[mod_dict[self.rep_stamp.month % 3]]
alias = '{prefix}-{month}'.format(prefix=quarterly_rule,
month=month)
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.mdiffs[0])
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
day = _weekday_rule_aliases[self.rep_stamp.weekday()]
return _maybe_add_count('W-{day}'.format(day=day), days / 7)
else:
return _maybe_add_count('D', days)
if self._is_business_daily():
return 'B'
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
def _get_annual_rule(self):
if len(self.ydiffs) > 1:
return None
if len(algos.unique(self.fields['M'])) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'AS', 'bs': 'BAS',
'ce': 'A', 'be': 'BA'}.get(pos_check)
def _get_quarterly_rule(self):
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {'cs': 'QS', 'bs': 'BQS',
'ce': 'Q', 'be': 'BQ'}.get(pos_check)
def _get_monthly_rule(self):
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {'cs': 'MS', 'bs': 'BMS',
'ce': 'M', 'be': 'BM'}.get(pos_check)
def _is_business_daily(self):
# quick check: cannot be business daily
if self.day_deltas != [1, 3]:
return False
# probably business daily, but need to confirm
first_weekday = self.index[0].weekday()
shifts = np.diff(self.index.asi8)
shifts = np.floor_divide(shifts, _ONE_DAY)
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
return np.all(((weekdays == 0) & (shifts == 3)) |
((weekdays > 0) & (weekdays <= 4) & (shifts == 1)))
def _get_wom_rule(self):
# wdiffs = unique(np.diff(self.index.week))
# We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
# Only attempt to infer up to WOM-4. See #9425
week_of_months = week_of_months[week_of_months < 4]
if len(week_of_months) == 0 or len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = _weekday_rule_aliases[weekdays[0]]
return 'WOM-{week}{weekday}'.format(week=week, weekday=wd)
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
wd = _weekday_rule_aliases[self.rep_stamp.weekday()]
alias = 'W-{weekday}'.format(weekday=wd)
return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count('D', days)
def _maybe_add_count(base, count):
if count != 1:
return '{count}{base}'.format(count=int(count), base=base)
else:
return base
def _maybe_coerce_freq(code):
""" we might need to coerce a code to a rule_code
and uppercase it
Parameters
----------
source : string
Frequency converting from
Returns
-------
string code
"""
assert code is not None
if isinstance(code, offsets.DateOffset):
code = code.rule_code
return code.upper()
def is_subperiod(source, target):
"""
Returns True if downsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_subperiod : boolean
"""
if target is None or source is None:
return False
source = _maybe_coerce_freq(source)
target = _maybe_coerce_freq(target)
if _is_annual(target):
if _is_quarterly(source):
return _quarter_months_conform(_get_rule_month(source),
_get_rule_month(target))
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(target):
return source in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_monthly(target):
return source in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(target):
return source in [target, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'B':
return source in ['B', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'C':
return source in ['C', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'D':
return source in ['D', 'H', 'T', 'S', 'L', 'U', 'N']
elif target == 'H':
return source in ['H', 'T', 'S', 'L', 'U', 'N']
elif target == 'T':
return source in ['T', 'S', 'L', 'U', 'N']
elif target == 'S':
return source in ['S', 'L', 'U', 'N']
elif target == 'L':
return source in ['L', 'U', 'N']
elif target == 'U':
return source in ['U', 'N']
elif target == 'N':
return source in ['N']
def is_superperiod(source, target):
"""
Returns True if upsampling is possible between source and target
frequencies
Parameters
----------
source : string
Frequency converting from
target : string
Frequency converting to
Returns
-------
is_superperiod : boolean
"""
if target is None or source is None:
return False
source = _maybe_coerce_freq(source)
target = _maybe_coerce_freq(target)
if _is_annual(source):
if _is_annual(target):
return _get_rule_month(source) == _get_rule_month(target)
if _is_quarterly(target):
smonth = _get_rule_month(source)
tmonth = _get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_quarterly(source):
return target in ['D', 'C', 'B', 'M', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_monthly(source):
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif _is_weekly(source):
return target in [source, 'D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'B':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'C':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'D':
return target in ['D', 'C', 'B', 'H', 'T', 'S', 'L', 'U', 'N']
elif source == 'H':
return target in ['H', 'T', 'S', 'L', 'U', 'N']
elif source == 'T':
return target in ['T', 'S', 'L', 'U', 'N']
elif source == 'S':
return target in ['S', 'L', 'U', 'N']
elif source == 'L':
return target in ['L', 'U', 'N']
elif source == 'U':
return target in ['U', 'N']
elif source == 'N':
return target in ['N']
_get_rule_month = tslib._get_rule_month
def _is_annual(rule):
rule = rule.upper()
return rule == 'A' or rule.startswith('A-')
def _quarter_months_conform(source, target):
snum = _month_numbers[source]
tnum = _month_numbers[target]
return snum % 3 == tnum % 3
def _is_quarterly(rule):
rule = rule.upper()
return rule == 'Q' or rule.startswith('Q-') or rule.startswith('BQ')
def _is_monthly(rule):
rule = rule.upper()
return rule == 'M' or rule == 'BM'
def _is_weekly(rule):
rule = rule.upper()
return rule == 'W' or rule.startswith('W-')
DAYS = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
MONTHS = tslib._MONTHS
_month_numbers = tslib._MONTH_NUMBERS
_month_aliases = tslib._MONTH_ALIASES
_weekday_rule_aliases = dict((k, v) for k, v in enumerate(DAYS))
def _is_multiple(us, mult):
return us % mult == 0
| bsd-3-clause |
ilo10/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
mdeemer/XlsxWriter | examples/pandas_chart_line.py | 9 | 1739 | ##############################################################################
#
# An example of converting a Pandas dataframe to an xlsx file with a line
# chart using Pandas and XlsxWriter.
#
# Copyright 2013-2015, John McNamara, [email protected]
#
import pandas as pd
import random
# Create some sample data to plot.
max_row = 21
categories = ['Node 1', 'Node 2', 'Node 3', 'Node 4']
index_1 = range(0, max_row, 1)
multi_iter1 = {'index': index_1}
for category in categories:
multi_iter1[category] = [random.randint(10, 100) for x in index_1]
# Create a Pandas dataframe from the data.
index_2 = multi_iter1.pop('index')
df = pd.DataFrame(multi_iter1, index=index_2)
df = df.reindex(columns=sorted(df.columns))
# Create a Pandas Excel writer using XlsxWriter as the engine.
sheet_name = 'Sheet1'
writer = pd.ExcelWriter('pandas_chart_line.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name)
# Access the XlsxWriter workbook and worksheet objects from the dataframe.
workbook = writer.book
worksheet = writer.sheets[sheet_name]
# Create a chart object.
chart = workbook.add_chart({'type': 'line'})
# Configure the series of the chart from the dataframe data.
for i in range(len(categories)):
col = i + 1
chart.add_series({
'name': ['Sheet1', 0, col],
'categories': ['Sheet1', 1, 0, max_row, 0],
'values': ['Sheet1', 1, col, max_row, col],
})
# Configure the chart axes.
chart.set_x_axis({'name': 'Index'})
chart.set_y_axis({'name': 'Value', 'major_gridlines': {'visible': False}})
# Insert the chart into the worksheet.
worksheet.insert_chart('G2', chart)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| bsd-2-clause |
yyjiang/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
nmartensen/pandas | asv_bench/benchmarks/series_methods.py | 6 | 3587 | from .pandas_vb_common import *
class series_constructor_no_data_datetime_index(object):
goal_time = 0.2
def setup(self):
self.dr = pd.date_range(
start=datetime(2015,10,26),
end=datetime(2016,1,1),
freq='50s'
) # ~100k long
def time_series_constructor_no_data_datetime_index(self):
Series(data=None, index=self.dr)
class series_constructor_dict_data_datetime_index(object):
goal_time = 0.2
def setup(self):
self.dr = pd.date_range(
start=datetime(2015, 10, 26),
end=datetime(2016, 1, 1),
freq='50s'
) # ~100k long
self.data = {d: v for d, v in zip(self.dr, range(len(self.dr)))}
def time_series_constructor_no_data_datetime_index(self):
Series(data=self.data, index=self.dr)
class series_isin_int64(object):
goal_time = 0.2
def setup(self):
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.s4 = Series(np.random.randint(1, 100, 10000000)).astype('int64')
self.values = [1, 2]
def time_series_isin_int64(self):
self.s3.isin(self.values)
def time_series_isin_int64_large(self):
self.s4.isin(self.values)
class series_isin_object(object):
goal_time = 0.2
def setup(self):
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.values = [1, 2]
self.s4 = self.s3.astype('object')
def time_series_isin_object(self):
self.s4.isin(self.values)
class series_nlargest1(object):
goal_time = 0.2
def setup(self):
self.s1 = Series(np.random.randn(10000))
self.s2 = Series(np.random.randint(1, 10, 10000))
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.values = [1, 2]
self.s4 = self.s3.astype('object')
def time_series_nlargest1(self):
self.s1.nlargest(3, keep='last')
self.s1.nlargest(3, keep='first')
class series_nlargest2(object):
goal_time = 0.2
def setup(self):
self.s1 = Series(np.random.randn(10000))
self.s2 = Series(np.random.randint(1, 10, 10000))
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.values = [1, 2]
self.s4 = self.s3.astype('object')
def time_series_nlargest2(self):
self.s2.nlargest(3, keep='last')
self.s2.nlargest(3, keep='first')
class series_nsmallest2(object):
goal_time = 0.2
def setup(self):
self.s1 = Series(np.random.randn(10000))
self.s2 = Series(np.random.randint(1, 10, 10000))
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.values = [1, 2]
self.s4 = self.s3.astype('object')
def time_series_nsmallest2(self):
self.s2.nsmallest(3, keep='last')
self.s2.nsmallest(3, keep='first')
class series_dropna_int64(object):
goal_time = 0.2
def setup(self):
self.s = Series(np.random.randint(1, 10, 1000000))
def time_series_dropna_int64(self):
self.s.dropna()
class series_dropna_datetime(object):
goal_time = 0.2
def setup(self):
self.s = Series(pd.date_range('2000-01-01', freq='S', periods=1000000))
self.s[np.random.randint(1, 1000000, 100)] = pd.NaT
def time_series_dropna_datetime(self):
self.s.dropna()
class series_clip(object):
goal_time = 0.2
def setup(self):
self.s = pd.Series(np.random.randn(50))
def time_series_dropna_datetime(self):
self.s.clip(0, 1)
| bsd-3-clause |
befelix/GPy | GPy/plotting/matplot_dep/img_plots.py | 15 | 2159 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
The module contains the tools for ploting 2D image visualizations
"""
import numpy as np
from matplotlib.cm import jet
width_max = 15
height_max = 12
def _calculateFigureSize(x_size, y_size, fig_ncols, fig_nrows, pad):
width = (x_size*fig_ncols+pad*(fig_ncols-1))
height = (y_size*fig_nrows+pad*(fig_nrows-1))
if width > float(height)/height_max*width_max:
return (width_max, float(width_max)/width*height)
else:
return (float(height_max)/height*width, height_max)
def plot_2D_images(figure, arr, symmetric=False, pad=None, zoom=None, mode=None, interpolation='nearest'):
ax = figure.add_subplot(111)
if len(arr.shape)==2:
arr = arr.reshape(*((1,)+arr.shape))
fig_num = arr.shape[0]
y_size = arr.shape[1]
x_size = arr.shape[2]
fig_ncols = int(np.ceil(np.sqrt(fig_num)))
fig_nrows = int(np.ceil((float)(fig_num)/fig_ncols))
if pad==None:
pad = max(int(min(y_size,x_size)/10),1)
figsize = _calculateFigureSize(x_size, y_size, fig_ncols, fig_nrows, pad)
#figure.set_size_inches(figsize,forward=True)
#figure.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95)
if symmetric:
# symmetric around zero: fix zero as the middle color
mval = max(abs(arr.max()),abs(arr.min()))
arr = arr/(2.*mval)+0.5
else:
minval,maxval = arr.min(),arr.max()
arr = (arr-minval)/(maxval-minval)
if mode=='L':
arr_color = np.empty(arr.shape+(3,))
arr_color[:] = arr.reshape(*(arr.shape+(1,)))
elif mode==None or mode=='jet':
arr_color = jet(arr)
buf = np.ones((y_size*fig_nrows+pad*(fig_nrows-1), x_size*fig_ncols+pad*(fig_ncols-1), 3),dtype=arr.dtype)
for y in range(fig_nrows):
for x in range(fig_ncols):
if y*fig_ncols+x<fig_num:
buf[y*y_size+y*pad:(y+1)*y_size+y*pad, x*x_size+x*pad:(x+1)*x_size+x*pad] = arr_color[y*fig_ncols+x,:,:,:3]
img_plot = ax.imshow(buf, interpolation=interpolation)
ax.axis('off')
| bsd-3-clause |
css-lucas/GAT | gat/core/nlp/radar.py | 2 | 2433 | # modified from: https://gist.github.com/sergiobuj/6721187
############################## IMPORTS ##############################
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
############################## RADAR FACTORY ##############################
def _radar_factory(num_vars):
theta = 2*np.pi*np.linspace(0, 1-1./num_vars, num_vars)
theta += np.pi/2
def unit_poly_verts(theta):
x0, y0, r = [0.5]*3
verts = [(r*np.cos(t)+x0, r*np.sin(t)+y0) for t in theta]
return verts
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta*180/np.pi, labels)
def _gen_axes_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def _gen_axes_spines(self):
spine_type = 'circle'
verts = unit_poly_verts(theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def graph(trope, labels=[], values=[], optimum=[], file_name='radar.png'):
N = len(labels)
theta = _radar_factory(N)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='radar')
ax.plot(theta, values, color='blue')
ax.plot(theta, optimum, color='r')
ax.set_varlabels(labels)
fig.text(0.5, 0.965, trope, horizontalalignment='center', color='black', weight='bold', size='large')
plt.savefig(file_name, dpi=100)
plt.clf()
| mit |
destrys/euler | python/config/jupyter_notebook_config.py | 1 | 24275 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
# 2 opens a new tab,
# 1 opens a new window,
# 0 opens in an existing window.
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'destry'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout is not 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. The minimum timeout is 300
# seconds (5 minutes). Positive values less than the minimum value will be set
# to the minimum.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
##
#c.ContentsManager.files_handler_class = 'notebook.base.handlers.IPythonHandler'
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| mit |
wheeler-microfluidics/microdrop | microdrop/core_plugins/command_plugin/plugin.py | 1 | 3735 | from multiprocessing import Process
import logging
import sys
from zmq_plugin.plugin import Plugin as ZmqPlugin
from zmq_plugin.schema import decode_content_data
import pandas as pd
from logging_helpers import _L #: .. versionadded:: 2.20
logger = logging.getLogger(__name__)
class CommandZmqPlugin(ZmqPlugin):
'''
API for registering commands.
'''
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.control_board = None
self._commands = pd.DataFrame(None, columns=['namespace',
'plugin_name',
'command_name', 'title'])
super(CommandZmqPlugin, self).__init__(*args, **kwargs)
def on_execute__unregister_command(self, request):
data = decode_content_data(request)
commands = self._commands
ix = commands.loc[(commands.namespace == data['namespace']) &
(commands.plugin_name == data['plugin_name']) &
(commands.command_name == data['command_name']) &
(commands.title == data['title'])].index
self._commands.drop(ix, inplace=True)
self._commands.reset_index(drop=True, inplace=True)
return self.commands
def on_execute__register_command(self, request):
data = decode_content_data(request)
plugin_name = data.get('plugin_name', request['header']['source'])
return self.register_command(plugin_name, data['command_name'],
namespace=data.get('namespace', ''),
title=data.get('title'))
def on_execute__get_commands(self, request):
return self.commands
def register_command(self, plugin_name, command_name, namespace='',
title=None):
'''
Register command.
Each command is unique by:
(namespace, plugin_name, command_name)
'''
if title is None:
title = (command_name[:1].upper() +
command_name[1:]).replace('_', ' ')
row_i = dict(zip(self._commands, [namespace, plugin_name, command_name,
title]))
self._commands = self._commands.append(row_i, ignore_index=True)
return self.commands
@property
def commands(self):
'''
Returns
-------
pd.Series
Series of command groups, where each group name maps to a series of
commands.
'''
return self._commands.copy()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
from zmq_plugin.bin.plugin import run_plugin
def run_plugin_process(uri, name, subscribe_options, log_level):
plugin_process = Process(target=run_plugin,
args=())
plugin_process.daemon = False
plugin_process.start()
args = parse_args()
logging.basicConfig(level=args.log_level)
task = CommandZmqPlugin(None, args.name, args.hub_uri, {})
run_plugin(task, args.log_level)
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/groupby/test_timegrouper.py | 6 | 25569 | """ test with the TimeGrouper / grouping with datetimes """
import pytest
from datetime import datetime
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (DataFrame, date_range, Index,
Series, MultiIndex, Timestamp, DatetimeIndex)
from pandas.compat import StringIO
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestGroupBy(object):
def test_groupby_with_timegrouper(self):
# GH 4161
# TimeGrouper requires a sorted index
# also verifies that the resultant index has the correct name
df_original = DataFrame({
'Buyer': 'Carl Carl Carl Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [
datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0),
]
})
# GH 6908 change target column's order
df_reordered = df_original.sort_values(by='Quantity')
for df in [df_original, df_reordered]:
df = df.set_index(['Date'])
expected = DataFrame(
{'Quantity': np.nan},
index=date_range('20130901 13:00:00',
'20131205 13:00:00', freq='5D',
name='Date', closed='left'))
expected.iloc[[0, 6, 18], 0] = np.array(
[24., 6., 9.], dtype='float64')
result1 = df.resample('5D') .sum()
assert_frame_equal(result1, expected)
df_sorted = df.sort_index()
result2 = df_sorted.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result2, expected)
result3 = df.groupby(pd.TimeGrouper(freq='5D')).sum()
assert_frame_equal(result3, expected)
def test_groupby_with_timegrouper_methods(self):
# GH 3881
# make sure API of timegrouper conforms
df_original = pd.DataFrame({
'Branch': 'A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 8, 9, 3],
'Date': [
datetime(2013, 1, 1, 13, 0),
datetime(2013, 1, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 12, 2, 14, 0),
]
})
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
df = df.set_index('Date', drop=False)
g = df.groupby(pd.TimeGrouper('6M'))
assert g.group_keys
assert isinstance(g.grouper, pd.core.groupby.BinGrouper)
groups = g.groups
assert isinstance(groups, dict)
assert len(groups) == 3
def test_timegrouper_with_reg_groups(self):
# GH 3794
# allow combinateion of timegrouper/reg groups
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
datetime(2013, 1, 1, 13, 0),
datetime(2013, 1, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 12, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 12, 31, 0, 0),
datetime(2013, 12, 31, 0, 0),
datetime(2013, 12, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='A'), 'Buyer']).sum()
assert_frame_equal(result, expected)
expected = DataFrame({
'Buyer': 'Carl Mark Carl Joe'.split(),
'Quantity': [1, 3, 9, 18],
'Date': [
datetime(2013, 1, 1, 0, 0),
datetime(2013, 1, 1, 0, 0),
datetime(2013, 7, 1, 0, 0),
datetime(2013, 7, 1, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='6MS'), 'Buyer']).sum()
assert_frame_equal(result, expected)
df_original = DataFrame({
'Branch': 'A A A A A A A B'.split(),
'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(),
'Quantity': [1, 3, 5, 1, 8, 1, 9, 3],
'Date': [
datetime(2013, 10, 1, 13, 0),
datetime(2013, 10, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 10, 2, 14, 0),
]
}).set_index('Date')
df_sorted = df_original.sort_values(by='Quantity', ascending=False)
for df in [df_original, df_sorted]:
expected = DataFrame({
'Buyer': 'Carl Joe Mark Carl Joe'.split(),
'Quantity': [6, 8, 3, 4, 10],
'Date': [
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 1, 0, 0),
datetime(2013, 10, 2, 0, 0),
datetime(2013, 10, 2, 0, 0),
]
}).set_index(['Date', 'Buyer'])
result = df.groupby([pd.Grouper(freq='1D'), 'Buyer']).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M'), 'Buyer']).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 10, 31, 0, 0),
datetime(2013, 10, 31, 0, 0),
datetime(2013, 10, 31, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# passing the name
df = df.reset_index()
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
with pytest.raises(KeyError):
df.groupby([pd.Grouper(freq='1M', key='foo'), 'Buyer']).sum()
# passing the level
df = df.set_index('Date')
result = df.groupby([pd.Grouper(freq='1M', level='Date'), 'Buyer'
]).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', level=0), 'Buyer']).sum(
)
assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.groupby([pd.Grouper(freq='1M', level='foo'),
'Buyer']).sum()
# multi names
df = df.copy()
df['Date'] = df.index + pd.offsets.MonthEnd(2)
result = df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer'
]).sum()
expected = DataFrame({
'Buyer': 'Carl Joe Mark'.split(),
'Quantity': [10, 18, 3],
'Date': [
datetime(2013, 11, 30, 0, 0),
datetime(2013, 11, 30, 0, 0),
datetime(2013, 11, 30, 0, 0),
]
}).set_index(['Date', 'Buyer'])
assert_frame_equal(result, expected)
# error as we have both a level and a name!
with pytest.raises(ValueError):
df.groupby([pd.Grouper(freq='1M', key='Date',
level='Date'), 'Buyer']).sum()
# single groupers
expected = DataFrame({'Quantity': [31],
'Date': [datetime(2013, 10, 31, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M')]).sum()
assert_frame_equal(result, expected)
expected = DataFrame({'Quantity': [31],
'Date': [datetime(2013, 11, 30, 0, 0)
]}).set_index('Date')
result = df.groupby(pd.Grouper(freq='1M', key='Date')).sum()
assert_frame_equal(result, expected)
result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum()
assert_frame_equal(result, expected)
# GH 6764 multiple grouping with/without sort
df = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, 364, 280, 259, 201, 623, 90, 312, 359, 301,
359, 801],
'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12]
}).set_index('date')
for freq in ['D', 'M', 'A', 'Q-APR']:
expected = df.groupby('user_id')[
'whole_cost'].resample(
freq).sum().dropna().reorder_levels(
['date', 'user_id']).sort_index().astype('int64')
expected.name = 'whole_cost'
result1 = df.sort_index().groupby([pd.TimeGrouper(freq=freq),
'user_id'])['whole_cost'].sum()
assert_series_equal(result1, expected)
result2 = df.groupby([pd.TimeGrouper(freq=freq), 'user_id'])[
'whole_cost'].sum()
assert_series_equal(result2, expected)
def test_timegrouper_get_group(self):
# GH 6914
df_original = DataFrame({
'Buyer': 'Carl Joe Joe Carl Joe Carl'.split(),
'Quantity': [18, 3, 5, 1, 9, 3],
'Date': [datetime(2013, 9, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 3, 10, 0),
datetime(2013, 12, 2, 12, 0),
datetime(2013, 9, 2, 14, 0), ]
})
df_reordered = df_original.sort_values(by='Quantity')
# single grouping
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
dt_list = ['2013-09-30', '2013-10-31', '2013-12-31']
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M', key='Date'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
# multiple grouping
expected_list = [df_original.iloc[[1]], df_original.iloc[[3]],
df_original.iloc[[4]]]
g_list = [('Joe', '2013-09-30'), ('Carl', '2013-10-31'),
('Joe', '2013-12-31')]
for df in [df_original, df_reordered]:
grouped = df.groupby(['Buyer', pd.Grouper(freq='M', key='Date')])
for (b, t), expected in zip(g_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group((b, dt))
assert_frame_equal(result, expected)
# with index
df_original = df_original.set_index('Date')
df_reordered = df_original.sort_values(by='Quantity')
expected_list = [df_original.iloc[[0, 1, 5]], df_original.iloc[[2, 3]],
df_original.iloc[[4]]]
for df in [df_original, df_reordered]:
grouped = df.groupby(pd.Grouper(freq='M'))
for t, expected in zip(dt_list, expected_list):
dt = pd.Timestamp(t)
result = grouped.get_group(dt)
assert_frame_equal(result, expected)
def test_timegrouper_apply_return_type_series(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_series(x):
return pd.Series([x['value'].sum()], ('sum',))
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_series)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_series))
assert_frame_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_timegrouper_apply_return_type_value(self):
# Using `apply` with the `TimeGrouper` should give the
# same return type as an `apply` with a `Grouper`.
# Issue #11742
df = pd.DataFrame({'date': ['10/10/2000', '11/10/2000'],
'value': [10, 13]})
df_dt = df.copy()
df_dt['date'] = pd.to_datetime(df_dt['date'])
def sumfunc_value(x):
return x.value.sum()
expected = df.groupby(pd.Grouper(key='date')).apply(sumfunc_value)
result = (df_dt.groupby(pd.TimeGrouper(freq='M', key='date'))
.apply(sumfunc_value))
assert_series_equal(result.reset_index(drop=True),
expected.reset_index(drop=True))
def test_groupby_groups_datetimeindex(self):
# #1430
periods = 1000
ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods)
df = DataFrame({'high': np.arange(periods),
'low': np.arange(periods)}, index=ind)
grouped = df.groupby(lambda x: datetime(x.year, x.month, x.day))
# it works!
groups = grouped.groups
assert isinstance(list(groups.keys())[0], datetime)
# GH 11442
index = pd.date_range('2015/01/01', periods=5, name='date')
df = pd.DataFrame({'A': [5, 6, 7, 8, 9],
'B': [1, 2, 3, 4, 5]}, index=index)
result = df.groupby(level='date').groups
dates = ['2015-01-05', '2015-01-04', '2015-01-03',
'2015-01-02', '2015-01-01']
expected = {pd.Timestamp(date): pd.DatetimeIndex([date], name='date')
for date in dates}
tm.assert_dict_equal(result, expected)
grouped = df.groupby(level='date')
for date in dates:
result = grouped.get_group(date)
data = [[df.loc[date, 'A'], df.loc[date, 'B']]]
expected_index = pd.DatetimeIndex([date], name='date')
expected = pd.DataFrame(data,
columns=list('AB'),
index=expected_index)
tm.assert_frame_equal(result, expected)
def test_groupby_groups_datetimeindex_tz(self):
# GH 3950
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': dates,
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
df['datetime'] = df['datetime'].apply(
lambda d: Timestamp(d, tz='US/Pacific'))
exp_idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
tz='US/Pacific', name='datetime')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['datetime', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.DatetimeIndex(dates, tz='Asia/Tokyo')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='Asia/Tokyo')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
assert result['date'][3] == Timestamp('2012-07-03')
def test_groupby_multi_timezone(self):
# combining multiple / different timezones yields UTC
data = """0,2000-01-28 16:47:00,America/Chicago
1,2000-01-29 16:48:00,America/Chicago
2,2000-01-30 16:49:00,America/Los_Angeles
3,2000-01-31 16:50:00,America/Chicago
4,2000-01-01 16:50:00,America/New_York"""
df = pd.read_csv(StringIO(data), header=None,
names=['value', 'date', 'tz'])
result = df.groupby('tz').date.apply(
lambda x: pd.to_datetime(x).dt.tz_localize(x.name))
expected = Series([Timestamp('2000-01-28 16:47:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-29 16:48:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-30 16:49:00-0800',
tz='America/Los_Angeles'),
Timestamp('2000-01-31 16:50:00-0600',
tz='America/Chicago'),
Timestamp('2000-01-01 16:50:00-0500',
tz='America/New_York')],
name='date',
dtype=object)
assert_series_equal(result, expected)
tz = 'America/Chicago'
res_values = df.groupby('tz').date.get_group(tz)
result = pd.to_datetime(res_values).dt.tz_localize(tz)
exp_values = Series(['2000-01-28 16:47:00', '2000-01-29 16:48:00',
'2000-01-31 16:50:00'],
index=[0, 1, 3], name='date')
expected = pd.to_datetime(exp_values).dt.tz_localize(tz)
assert_series_equal(result, expected)
def test_groupby_groups_periods(self):
dates = ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00']
df = DataFrame({'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'period': [pd.Period(d, freq='H') for d in dates],
'value1': np.arange(6, dtype='int64'),
'value2': [1, 2] * 3})
exp_idx1 = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00',
'2011-07-19 09:00:00'],
freq='H', name='period')
exp_idx2 = Index(['a', 'b'] * 3, name='label')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'value1': [0, 3, 1, 4, 2, 5],
'value2': [1, 2, 2, 1, 1, 2]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(['period', 'label']).sum()
assert_frame_equal(result, expected)
# by level
didx = pd.PeriodIndex(dates, freq='H')
df = DataFrame({'value1': np.arange(6, dtype='int64'),
'value2': [1, 2, 3, 1, 2, 3]},
index=didx)
exp_idx = pd.PeriodIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], freq='H')
expected = DataFrame({'value1': [3, 5, 7], 'value2': [2, 4, 6]},
index=exp_idx, columns=['value1', 'value2'])
result = df.groupby(level=0).sum()
assert_frame_equal(result, expected)
def test_groupby_first_datetime64(self):
df = DataFrame([(1, 1351036800000000000), (2, 1351036800000000000)])
df[1] = df[1].view('M8[ns]')
assert issubclass(df[1].dtype.type, np.datetime64)
result = df.groupby(level=0).first()
got_dt = result[1].dtype
assert issubclass(got_dt.type, np.datetime64)
result = df[1].groupby(level=0).first()
got_dt = result.dtype
assert issubclass(got_dt.type, np.datetime64)
def test_groupby_max_datetime64(self):
# GH 5869
# datetimelike dtype conversion from int
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = df.groupby('A')['A'].apply(lambda x: x.max())
result = df.groupby('A')['A'].max()
assert_series_equal(result, expected)
def test_groupby_datetime64_32_bit(self):
# GH 6410 / numpy 4328
# 32-bit under 1.9-dev indexing issue
df = DataFrame({"A": range(2), "B": [pd.Timestamp('2000-01-1')] * 2})
result = df.groupby("A")["B"].transform(min)
expected = Series([pd.Timestamp('2000-01-1')] * 2, name='B')
assert_series_equal(result, expected)
def test_groupby_with_timezone_selection(self):
# GH 11616
# Test that column selection returns output in correct timezone.
np.random.seed(42)
df = pd.DataFrame({
'factor': np.random.randint(0, 3, size=60),
'time': pd.date_range('01/01/2000 00:00', periods=60,
freq='s', tz='UTC')
})
df1 = df.groupby('factor').max()['time']
df2 = df.groupby('factor')['time'].max()
tm.assert_series_equal(df1, df2)
def test_timezone_info(self):
# GH 11682
# Timezone info lost when broadcasting scalar datetime to DataFrame
tm._skip_if_no_pytz()
import pytz
df = pd.DataFrame({'a': [1], 'b': [datetime.now(pytz.utc)]})
assert df['b'][0].tzinfo == pytz.utc
df = pd.DataFrame({'a': [1, 2, 3]})
df['b'] = datetime.now(pytz.utc)
assert df['b'][0].tzinfo == pytz.utc
def test_datetime_count(self):
df = DataFrame({'a': [1, 2, 3] * 2,
'dates': pd.date_range('now', periods=6, freq='T')})
result = df.groupby('a').dates.count()
expected = Series([
2, 2, 2
], index=Index([1, 2, 3], name='a'), name='dates')
tm.assert_series_equal(result, expected)
def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
# Dataframe with datetime or timedelta values.
from datetime import timedelta as td
df_test = DataFrame(
{'dt': [nan, '2015-07-24 10:10', '2015-07-25 11:11',
'2015-07-23 12:12', nan],
'td': [nan, td(days=1), td(days=2), td(days=3), nan]})
df_test.dt = pd.to_datetime(df_test.dt)
df_test['group'] = 'A'
df_ref = df_test[df_test.dt.notnull()]
grouped_test = df_test.groupby('group')
grouped_ref = df_ref.groupby('group')
assert_frame_equal(grouped_ref.max(), grouped_test.max())
assert_frame_equal(grouped_ref.min(), grouped_test.min())
assert_frame_equal(grouped_ref.first(), grouped_test.first())
assert_frame_equal(grouped_ref.last(), grouped_test.last())
| mit |
EFord36/normalise | evaluation/Eval_ALPHA.py | 1 | 2444 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import (accuracy_score, confusion_matrix, precision_score,
recall_score)
from normalise.class_ALPHA import run_clfALPHA, gen_frame
from normalise.tagger import tagify
from gs_ALPHA_dict import gs_ALPHA_dict, gs_ALPHA_tagged
from gold_standard_full import gold_standard
text = gold_standard
def create_ALPHA_ex():
ALPHA_ex = []
for ind, (word, tag) in gs_ALPHA_dict.items():
ALPHA_ex.append(gen_frame((ind, (word, tag)), text))
with open('gs_alphas', mode='w', encoding='utf-8') as file:
file.write(str(ALPHA_ex))
gold_standard_predicted = run_clfALPHA(gs_ALPHA_dict, text, verbose=False)
def gold_vs_pred_tuple():
""" Return list of predicted tags and list of gold standard tags"""
predicted = []
gold = []
for ind, (value1, value2, value3) in gold_standard_predicted.items():
predicted.append(value3)
gold.append(gs_ALPHA_tagged[ind][2])
return predicted, gold
accuracy = accuracy_score(gold_vs_pred_tuple()[0], gold_vs_pred_tuple()[1])
labels = ['LSEQ', 'EXPN', 'WDLK']
# Return a confusion matrix.
confusion = confusion_matrix(gold_vs_pred_tuple()[0], gold_vs_pred_tuple()[1],
labels)
# Return a normalised confusion matrix.
confusion_normalised = (confusion.astype('float') / confusion.sum(axis=1)
[:, np.newaxis])
def plot_confusion_matrix(r):
""" Plot a graphical confusion matrix with predicted tags on the x axis
and correct tags on the y axis. Allows us to see which pairs of tags are
confused most frequently.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(r)
plt.title('Confusion Matrix')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
def list_errors():
for ind, (txt, tag, ntag) in gold_standard_predicted.items():
if ntag != gs_ALPHA_tagged[ind][2]:
print("Ind: {0}, Item: {1}, ".format(ind, txt)
+ "Predicted Tag: {}, ".format(ntag)
+ "True Tag: {}, ".format(gs_ALPHA_tagged[ind][2])
+ "/n, {}".format(gen_frame((ind, (txt, tag)), text))
)
| gpl-3.0 |
Typere/gr-specest | python/fam_matplotlib.py | 2 | 3842 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright 2011,2013 Communications Engineering Lab, KIT
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Provides functionality to run the FAM on-line with Matplotlib output.
"""
import numpy
import time
import gobject
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pylab as plt
from gnuradio import gr
from gnuradio import analog
from gnuradio import blocks
import specest
class FAMProcessor(gr.top_block):
""" Simple flow graph: run file through FAM.
Plotting is done in animate()! """
def __init__(self, Np=32, P=128, L=2,
filename=None, sample_type='complex', verbose=True):
gr.top_block.__init__(self)
if filename is None:
src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
if verbose:
print "Using Gaussian noise source."
else:
if sample_type == 'complex':
src = blocks.file_source(gr.sizeof_gr_complex, filename, True)
else:
fsrc = blocks.file_source(gr.sizeof_float, filename, True)
src = blocks.float_to_complex()
self.connect(fsrc, src)
if verbose:
print "Reading data from %s" % filename
if verbose:
print "FAM configuration:"
print "N' = %d" % Np
print "P = %d" % P
print "L = %d" % L
#print "Δf = %f" % asfd
sink = blocks.null_sink(gr.sizeof_float * 2 * Np)
self.cyclo_fam = specest.cyclo_fam(Np, P, L)
self.connect(src, self.cyclo_fam, sink)
def animate(fam_block, image, cbar):
""" Read the data from the running block and shove it onto
the Matplotlib widget.
"""
while(True):
raw = fam_block.get_estimate()
data = numpy.array(raw)
image.set_data(data)
image.changed()
cbar.set_clim(vmax=data.max())
cbar.draw_all()
plt.draw()
yield True
def setup_fam_matplotlib(Np, P, L, filename, sample_type, verbose,
animate_func=animate):
"""Setup the FAM flow graph and Matplotlib and start it. """
mytb = FAMProcessor(filename=filename,
Np=Np, P=P, L=L,
sample_type=sample_type,
verbose=verbose)
# Start Flowgraph in background, then give it some time to fire up
mytb.start()
time.sleep(3)
# Calc First Image to Show, setup axis
raw = mytb.cyclo_fam.get_estimate()
data = numpy.array(raw)
image = plt.imshow(data,
interpolation='nearest',
animated=True,
extent=(-0.5, 0.5-1.0/Np, -1.0, 1.0-1.0/(P*L)))
cbar = plt.colorbar(image)
plt.xlabel('frequency / fs')
plt.ylabel('cycle frequency / fs')
plt.axis('normal')
plt.title('Magnitude of estimated cyclic spectrum with FAM')
# optional:
# pylab.axhline(linewidth=1, color='w')
# pylab.axvline(linewidth=1, color='w')
gobject.idle_add(lambda iter=animate_func(mytb.cyclo_fam, image, cbar): iter.next())
plt.show()
| gpl-3.0 |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/extensions/rmagic.py | 5 | 22605 | # -*- coding: utf-8 -*-
"""
======
Rmagic
======
Magic command interface for interactive work with R via rpy2
.. note::
The ``rpy2`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
You will also need a working copy of R.
Usage
=====
To enable the magics below, execute ``%load_ext rmagic``.
``%R``
{R_DOC}
``%Rpush``
{RPUSH_DOC}
``%Rpull``
{RPULL_DOC}
``%Rget``
{RGET_DOC}
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import tempfile
from glob import glob
from shutil import rmtree
# numpy and rpy2 imports
import numpy as np
import rpy2.rinterface as ri
import rpy2.robjects as ro
try:
from rpy2.robjects import pandas2ri
pandas2ri.activate()
except ImportError:
pandas2ri = None
from rpy2.robjects import numpy2ri
numpy2ri.activate()
# IPython imports
from IPython.core.displaypub import publish_display_data
from IPython.core.magic import (Magics, magics_class, line_magic,
line_cell_magic, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring
)
from IPython.external.simplegeneric import generic
from IPython.utils.py3compat import (str_to_unicode, unicode_to_str, PY3,
unicode_type)
from IPython.utils.text import dedent
class RInterpreterError(ri.RRuntimeError):
"""An error when running R code in a %%R magic cell."""
def __init__(self, line, err, stdout):
self.line = line
self.err = err.rstrip()
self.stdout = stdout.rstrip()
def __unicode__(self):
s = 'Failed to parse and evaluate line %r.\nR error message: %r' % \
(self.line, self.err)
if self.stdout and (self.stdout != self.err):
s += '\nR stdout:\n' + self.stdout
return s
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode_to_str(unicode(self), 'utf-8')
def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
if cols != ri.NULL:
Robj = as_data_frame(Robj)
names = tuple(np.array(cols))
elif _names != ri.NULL:
names = tuple(np.array(_names))
else: # failed to find names
return np.asarray(Robj)
Robj = np.rec.fromarrays(Robj, names = names)
return np.asarray(Robj)
@generic
def pyconverter(pyobj):
"""Convert Python objects to R objects. Add types using the decorator:
@pyconverter.when_type
"""
return pyobj
# The default conversion for lists seems to make them a nested list. That has
# some advantages, but is rarely convenient, so for interactive use, we convert
# lists to a numpy array, which becomes an R vector.
@pyconverter.when_type(list)
def pyconverter_list(pyobj):
return np.asarray(pyobj)
if pandas2ri is None:
# pandas2ri was new in rpy2 2.3.3, so for now we'll fallback to pandas'
# conversion function.
try:
from pandas import DataFrame
from pandas.rpy.common import convert_to_r_dataframe
@pyconverter.when_type(DataFrame)
def pyconverter_dataframe(pyobj):
return convert_to_r_dataframe(pyobj, strings_as_factors=True)
except ImportError:
pass
@magics_class
class RMagics(Magics):
"""A set of magics useful for interactive work with R via rpy2.
"""
def __init__(self, shell, Rconverter=Rconverter,
pyconverter=pyconverter,
cache_display_data=False):
"""
Parameters
----------
shell : IPython shell
Rconverter : callable
To be called on values taken from R before putting them in the
IPython namespace.
pyconverter : callable
To be called on values in ipython namespace before
assigning to variables in rpy2.
cache_display_data : bool
If True, the published results of the final call to R are
cached in the variable 'display_cache'.
"""
super(RMagics, self).__init__(shell)
self.cache_display_data = cache_display_data
self.r = ro.R()
self.Rstdout_cache = []
self.pyconverter = pyconverter
self.Rconverter = Rconverter
def eval(self, line):
'''
Parse and evaluate a line of R code with rpy2.
Returns the output to R's stdout() connection,
the value generated by evaluating the code, and a
boolean indicating whether the return value would be
visible if the line of code were evaluated in an R REPL.
R Code evaluation and visibility determination are
done via an R call of the form withVisible({<code>})
'''
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
try:
res = ro.r("withVisible({%s\n})" % line)
value = res[0] #value (R object)
visible = ro.conversion.ri2py(res[1])[0] #visible (boolean)
except (ri.RRuntimeError, ValueError) as exception:
warning_or_other_msg = self.flush() # otherwise next return seems to have copy of error
raise RInterpreterError(line, str_to_unicode(str(exception)), warning_or_other_msg)
text_output = self.flush()
ri.set_writeconsole(old_writeconsole)
return text_output, value, visible
def write_console(self, output):
'''
A hook to capture R's stdout in a cache.
'''
self.Rstdout_cache.append(output)
def flush(self):
'''
Flush R's stdout cache to a string, returning the string.
'''
value = ''.join([str_to_unicode(s, 'utf-8') for s in self.Rstdout_cache])
self.Rstdout_cache = []
return value
@skip_doctest
@needs_local_scope
@line_magic
def Rpush(self, line, local_ns=None):
'''
A line-level magic for R that pushes
variables from python to rpy2. The line should be made up
of whitespace separated variable names in the IPython
namespace::
In [7]: import numpy as np
In [8]: X = np.array([4.5,6.3,7.9])
In [9]: X.mean()
Out[9]: 6.2333333333333343
In [10]: %Rpush X
In [11]: %R mean(X)
Out[11]: array([ 6.23333333])
'''
if local_ns is None:
local_ns = {}
inputs = line.split(' ')
for input in inputs:
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
# reraise the KeyError as a NameError so that it looks like
# the standard python behavior when you use an unnamed
# variable
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'outputs',
nargs='*',
)
@line_magic
def Rpull(self, line):
'''
A line-level magic for R that pulls
variables from python to rpy2::
In [18]: _ = %R x = c(3,4,6.7); y = c(4,6,7); z = c('a',3,4)
In [19]: %Rpull x y z
In [20]: x
Out[20]: array([ 3. , 4. , 6.7])
In [21]: y
Out[21]: array([ 4., 6., 7.])
In [22]: z
Out[22]:
array(['a', '3', '4'],
dtype='|S1')
If --as_dataframe, then each object is returned as a structured array
after first passed through "as.data.frame" in R before
being calling self.Rconverter.
This is useful when a structured array is desired as output, or
when the object in R has mixed data types.
See the %%R docstring for more examples.
Notes
-----
Beware that R names can have '.' so this is not fool proof.
To avoid this, don't name your R objects with '.'s...
'''
args = parse_argstring(self.Rpull, line)
outputs = args.outputs
for output in outputs:
self.shell.push({output:self.Rconverter(self.r(output),dataframe=args.as_dataframe)})
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'output',
nargs=1,
type=str,
)
@line_magic
def Rget(self, line):
'''
Return an object from rpy2, possibly as a structured array (if possible).
Similar to Rpull except only one argument is accepted and the value is
returned rather than pushed to self.shell.user_ns::
In [3]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [4]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [5]: %R -i datapy
In [6]: %Rget datapy
Out[6]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [7]: %Rget -d datapy
Out[7]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
'''
args = parse_argstring(self.Rget, line)
output = args.output
return self.Rconverter(self.r(output[0]),dataframe=args.as_dataframe)
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-w', '--width', type=int,
help='Width of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-h', '--height', type=int,
help='Height of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-d', '--dataframe', action='append',
help='Convert these objects to data.frames and return as structured arrays.'
)
@argument(
'-u', '--units', type=unicode_type, choices=["px", "in", "cm", "mm"],
help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
)
@argument(
'-r', '--res', type=int,
help='Resolution of png plotting device sent as an argument to *png* in R. Defaults to 72 if *units* is one of ["in", "cm", "mm"].'
)
@argument(
'-p', '--pointsize', type=int,
help='Pointsize of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-b', '--bg',
help='Background of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-n', '--noreturn',
help='Force the magic to not return anything.',
action='store_true',
default=False
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def R(self, line, cell=None, local_ns=None):
'''
Execute code in R, and pull some of the results back into the Python namespace.
In line mode, this will evaluate an expression and convert the returned value to a Python object.
The return value is determined by rpy2's behaviour of returning the result of evaluating the
final line.
Multiple R lines can be executed by joining them with semicolons::
In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
Out[9]: array([ 4.25])
In cell mode, this will run a block of R code. The resulting value
is printed if it would printed be when evaluating the same code
within a standard R REPL.
Nothing is returned to python by default in cell mode::
In [10]: %%R
....: Y = c(2,4,3,9)
....: summary(lm(Y~X))
Call:
lm(formula = Y ~ X)
Residuals:
1 2 3 4
0.88 -0.24 -2.28 1.64
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.0800 2.3000 0.035 0.975
X 1.0400 0.4822 2.157 0.164
Residual standard error: 2.088 on 2 degrees of freedom
Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
F-statistic: 4.651 on 1 and 2 DF, p-value: 0.1638
In the notebook, plots are published as the output of the cell::
%R plot(X, Y)
will create a scatter plot of X bs Y.
If cell is not None and line has some R code, it is prepended to
the R code in cell.
Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::
In [14]: Z = np.array([1,4,5,10])
In [15]: %R -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %R -o W W=Z*mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The return value is determined by these rules:
* If the cell is not None, the magic returns None.
* If the cell evaluates as False, the resulting value is returned
unless the final line prints something to the console, in
which case None is returned.
* If the final line results in a NULL value when evaluated
by rpy2, then None is returned.
* No attempt is made to convert the final value to a structured array.
Use the --dataframe flag or %Rget to push / return a structured array.
* If the -n flag is present, there is no return value.
* A trailing ';' will also result in no return value as the last
value in the line is an empty string.
The --dataframe argument will attempt to return structured arrays.
This is useful for dataframes with
mixed data types. Note also that for a data.frame,
if it is returned as an ndarray, it is transposed::
In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [20]: %%R -o datar
datar = datapy
....:
In [21]: datar
Out[21]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [22]: %%R -d datar
datar = datapy
....:
In [23]: datar
Out[23]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
The --dataframe argument first tries colnames, then names.
If both are NULL, it returns an ndarray (i.e. unstructured)::
In [1]: %R mydata=c(4,6,8.3); NULL
In [2]: %R -d mydata
In [3]: mydata
Out[3]: array([ 4. , 6. , 8.3])
In [4]: %R names(mydata) = c('a','b','c'); NULL
In [5]: %R -d mydata
In [6]: mydata
Out[6]:
array((4.0, 6.0, 8.3),
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
In [7]: %R -o mydata
In [8]: mydata
Out[8]: array([ 4. , 6. , 8.3])
'''
args = parse_argstring(self.R, line)
# arguments 'code' in line are prepended to
# the cell lines
if cell is None:
code = ''
return_output = True
line_mode = True
else:
code = cell
return_output = False
line_mode = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
if getattr(args, 'units') is not None:
if args.units != "px" and getattr(args, 'res') is None:
args.res = 72
args.units = '"%s"' % args.units
png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'res', 'height', 'width', 'bg', 'pointsize']])
png_args = ','.join(['%s=%s' % (o,v) for o, v in png_argdict.items() if v is not None])
# execute the R code in a temporary directory
tmpd = tempfile.mkdtemp()
self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd.replace('\\', '/'), png_args))
text_output = ''
try:
if line_mode:
for line in code.split(';'):
text_result, result, visible = self.eval(line)
text_output += text_result
if text_result:
# the last line printed something to the console so we won't return it
return_output = False
else:
text_result, result, visible = self.eval(code)
text_output += text_result
if visible:
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
ro.r.show(result)
text_output += self.flush()
ri.set_writeconsole(old_writeconsole)
except RInterpreterError as e:
print(e.stdout)
if not e.stdout.endswith(e.err):
print(e.err)
rmtree(tmpd)
return
finally:
self.r('dev.off()')
# read out all the saved .png files
images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]
# now publish the images
# mimicking IPython/zmq/pylab/backend_inline.py
fmt = 'png'
mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
mime = mimetypes[fmt]
# publish the printed R objects, if any
display_data = []
if text_output:
display_data.append(('RMagic.R', {'text/plain':text_output}))
# flush text streams before sending figures, helps a little with output
for image in images:
# synchronization in the console (though it's a bandaid, not a real sln)
sys.stdout.flush(); sys.stderr.flush()
display_data.append(('RMagic.R', {mime: image}))
# kill the temporary directory
rmtree(tmpd)
# try to turn every output into a numpy array
# this means that output are assumed to be castable
# as numpy arrays
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})
if args.dataframe:
for output in ','.join(args.dataframe).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})
for tag, disp_d in display_data:
publish_display_data(tag, disp_d)
# this will keep a reference to the display_data
# which might be useful to other objects who happen to use
# this method
if self.cache_display_data:
self.display_cache = display_data
# if in line mode and return_output, return the result as an ndarray
if return_output and not args.noreturn:
if result != ri.NULL:
return self.Rconverter(result, dataframe=False)
__doc__ = __doc__.format(
R_DOC = dedent(RMagics.R.__doc__),
RPUSH_DOC = dedent(RMagics.Rpush.__doc__),
RPULL_DOC = dedent(RMagics.Rpull.__doc__),
RGET_DOC = dedent(RMagics.Rget.__doc__)
)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(RMagics)
# Initialising rpy2 interferes with readline. Since, at this point, we've
# probably just loaded rpy2, we reset the delimiters. See issue gh-2759.
if ip.has_readline:
ip.readline.set_completer_delims(ip.readline_delims)
| mit |
gromacs/copernicus | cpc/lib/msm/msmproject.py | 1 | 21848 | # This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import subprocess
import re
import logging
import traceback
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from scipy.stats import halfnorm
import scipy
import scipy.sparse
import random
from numpy import where
from numpy import array,argmax
import numpy
#import random
import matplotlib
#Use a non GUI backend for matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Make sure msmbuilder is in the PYTHONPATH
from msmbuilder.CopernicusProject import *
import msmbuilder.MSMLib
import msmbuilder.Serializer
import msmbuilder.Trajectory
#import DataFile
# cpc stuff
import cpc.dataflow
from cpc.dataflow import FileValue
from cpc.lib.gromacs import cmds
log=logging.getLogger(__name__)
class TrajData(object):
"""Information about a trajectory"""
def __init__(self, lh5, xtc, xtc_nopbc, tpr, dt, frames):
self.lh5=lh5
self.xtc=xtc
self.xtc_nopbc=xtc_nopbc
self.tpr=tpr
self.dt=dt
self.frames=frames
class MSMProject(object):
''' MSM specific project data '''
# the name of the top-level element for this project data type
elementName = ""
def __init__(self, inp, out):
''' Initialize MSMProject '''
self.inp=inp
self.out=out # the output of this call of the MSM function
self.num_micro = inp.getInput('num_microstates')
if self.num_micro <= 1:
sys.stderr("Error: num_micro=%d: invalid number\n"%self.num_micro)
self.num_macro = int(inp.getInput('num_macrostates'))
if self.num_macro <= 1:
sys.stderr("Error: num_macro=%d: invalid number\n"%self.num_macro)
self.ref_conf = inp.getInput('reference')
self.grpname = inp.getInput('grpname')
#self.num_sim = inp.getInput('num_to_start')
self.lag_time = inp.getInput('lag_time')
self.cmdnames = cmds.GromacsCommands()
#TODO IMAN get input weights
if self.lag_time is not None and self.lag_time <= 0:
sys.stderr("Error: lag_time=%g: invalid number\n"%self.lag_time)
if inp.getInput('ndx') is not None:
self.ndx = inp.getInput('ndx')
else:
self.ndx = None
# The msm-builder project
self.Proj = None
# The assignments from msm-builder
self.assignments = None
# The transition count matrix from msm-builder
self.T = None
# The desired lag time
self.max_time = None
# Sims to start per round
self.num_to_start = int(inp.getInput('start_per_state'))
# handle trajectories
self.avgtime=0.
self.filelist=[]
self.trajData=dict()
ta=self.inp.getInput('trajectories')
i=0
for traj in ta:
lh5=self.inp.getInput('trajectories[%d].lh5'%i)
xtc=self.inp.getInput('trajectories[%d].xtc'%i)
xtc_nopbc=self.inp.getInput('trajectories[%d].xtc_nopbc'%i)
tpr=self.inp.getInput('trajectories[%d].tpr'%i)
dt=self.inp.getInput('trajectories[%d].dt'%i)
frames=self.inp.getInput('trajectories[%d].frames'%i)
self.filelist.append([lh5])
self.trajData[lh5]=TrajData(lh5, xtc, xtc_nopbc, tpr, dt, frames)
self.avgtime += dt * (frames-1)/1000.
i+=1
self.avgtime /= i
sys.stderr.write("Average trajectory time: %g ns\n"%(self.avgtime))
sys.stderr.write("filelist size=%d.\n"%(len(self.filelist)))
random.seed()
#def updateBoxVectors(self):
# ''' Fixes new box-vectors on all gro-files in the RandomConfs-dir '''
#
# grolist = []
#
# files = os.listdir('RandomConfs')
# for f in files:
# if f.endswith('.gro'):
# confFile = os.path.join('RandomConfs',f)
#
# cmd = 'sed \'$d\' < %s > foo; mv foo %s'%(confFile,confFile)
# retcode = subprocess.call(cmd,shell=True)
# cmd = 'tail -n 1 %s >> %s'%(self.grofile[0],confFile)
# retcode = subprocess.call(cmd,shell=True)
#
# grolist.append(f)
#
# return grolist
#def listRandomConfs(self):
# ''' Makes a list of all gro-files in the RandomConfs dir '''
# grolist = []
# files = os.listdir(self.inp.getOutputDir())
# for f in files:
# if f.endswith('.gro'):
# grolist.append(f)
# return grolist
#
def getNewSimTime(self):
''' Compute a new simulation time from a half-normal distribution '''
# Extend to 400 ns (hardcoded for villin)
new_length = 400000
r = random.random()
if(r>0.9):
nst = int(new_length/self.dt)
else:
nst = 25000000
return nst
def createMicroStates(self):
''' Build a micro-state MSM '''
sys.stderr.write("Creating msm project, ref_conf=%s.\n"%
str(self.ref_conf))
# Create the msm project from the reference conformation
#TODO IMAN provide weighting here
Proj = CreateCopernicusProject(self.ref_conf, self.filelist)
self.Proj = Proj
C1 = Conformation.Conformation.LoadFromPDB(self.ref_conf)
# Automate the clustering to only CA or backbone atoms
# TODO: fix this
a = C1["AtomNames"]
AtomIndices=where((a=="N") | (a=="C") | (a=="CA") | (a=="O"))[0]
sys.stderr.write("Cluster project.\n")
# Do msm-stuff
GenF = os.path.join('Data','Gens.nopbc.h5')
AssF = os.path.join('Data','Ass.nopbc.h5')
AssFTrimmed = os.path.join('Data','Assignment-trimmed.nopbc.h5')
RmsF = os.path.join('Data','RMSD.nopbc.h5')
Generators = Proj.ClusterProject(AtomIndices=AtomIndices,
NumGen=self.num_micro,Stride=30)
sys.stderr.write("Assign project.\n")
Assignments,RMSD,WhichTrajs = Proj.AssignProject(Generators,
AtomIndices=AtomIndices)
if os.path.exists(GenF):
os.remove(GenF)
Generators.SaveToHDF(GenF)
if os.path.exists(AssF):
os.remove(AssF)
msmbuilder.Serializer.SaveData(AssF,Assignments)
if os.path.exists(RmsF):
os.remove(RmsF)
msmbuilder.Serializer.SaveData(RmsF,RMSD)
sys.stderr.write("Trim data.\n")
# Trim data
Counts = msmbuilder.MSMLib.GetCountMatrixFromAssignments(Assignments,
self.num_micro,
LagTime=1,
Slide=True)
# Get the most populated state
sys.stderr.write("Get the most populated state.\n")
X0 = array((Counts+Counts.transpose()).sum(0)).flatten()
X0 = X0/sum(X0)
MaxState = argmax(X0)
## Calculate only times up to at maximum half the
## length of an individual trajectory
max_time = self.avgtime/2.
#max_time = ((self.dt * self.nstep / 1000)*0.5)
## SP this is almost certainly wrong:
#if max_time > 1:
# max_time=int(max_time)
#else:
# max_time=2
###max_time = 300 # hard-coded for villin
self.max_time = max_time
# More trimming
# PK want ErgodicTrim instead of EnforceMetastability
# This is from BuildMSM script
sys.stderr.write("More trimming...\n")
CountsAfterTrimming,Mapping=msmbuilder.MSMLib.ErgodicTrim(Counts)
msmbuilder.MSMLib.ApplyMappingToAssignments(Assignments,Mapping)
ReversibleCounts = msmbuilder.MSMLib.IterativeDetailedBalance(
CountsAfterTrimming,
Prior=0)
TC = msmbuilder.MSMLib.EstimateTransitionMatrix(ReversibleCounts)
Populations=numpy.array(ReversibleCounts.sum(0)).flatten()
Populations/=Populations.sum()
self.assignments=Assignments
self.T=TC
NumStates=max(Assignments.flatten())+1
sys.stderr.write("New number of states=%d\n"%NumStates)
if os.path.exists(AssFTrimmed):
os.remove(AssFTrimmed)
msmbuilder.Serializer.SaveData(AssFTrimmed,Assignments)
sys.stderr.write("Calculating implied time scales..\n")
# Calculate the implied time-scales
time = numpy.arange(1,max_time+1,1)
TS = msmbuilder.MSMLib.GetImpliedTimescales(AssFTrimmed,NumStates,time,
NumImpliedTimes=len(time)+1)
sys.stderr.write("TS=%s, time=%s\n"%(str(TS), time))
try:
plt.scatter(TS[:,0],TS[:,1])
plt.title('Lag times versus implied time scale')
plt.xlabel('Lag Time (assignment-steps)')
plt.ylabel('Implied Timescale (ps)')
plt.yscale('log')
timescalefn=os.path.join(self.inp.getOutputDir(), 'msm_timescales.png')
sys.stderr.write('Writing timescale plot to %s'%timescalefn)
try:
plt.savefig(timescalefn)
except:
fo=StringIO()
traceback.print_exception(sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2], file=fo)
errmsg="Run error generating timescale plot: %s\n"%(fo.
getvalue())
sys.stderr.write(errmsg)
plt.close()
self.out.setOut('timescales', FileValue(timescalefn))
except ValueError as e:
fo=StringIO()
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1],
sys.exc_info()[2], file=fo)
errmsg="Run error generating timescale plot: %s\n"%(fo.getvalue())
sys.stderr.write(errmsg)
# Get random confs from each state
sys.stderr.write("Getting random configuration from each state..\n")
RandomConfs = Proj.GetRandomConfsFromEachState(Assignments,NumStates,1,
JustGetIndices=True)
# Compute the MaxState with the new assignments (ie. after trimming)
sys.stderr.write("Computing MaxState.\n")
Counts=msmbuilder.MSMLib.GetCountMatrixFromAssignments(Assignments,
NumStates,
LagTime=1,
Slide=True)
X0=array((Counts+Counts.transpose()).sum(0)).flatten()
X0=X0/sum(X0)
MaxState=argmax(X0)
# Create a tpr-file for trjconv with -pbc mol
#sys.stderr.write("making randomconfs.\n")
#try:
# os.mkdir('RandomConfs')
#except:
# pass
# we need a tpr file to be able to trjconv random confs later
#proc = subprocess.Popen(["grompp","-f","%s"%self.mdpfile,
# "-c","%s"%self.grofile[0],
# "-p", "%s"%self.topfile,"-o",
# "%s"%os.path.join(self.inp.getOutputDir(),
# 'topol.tpr')],
# stdin=None,stdout=sys.stdout, stderr=sys.stdout)
#proc.communicate(None)
# we pick one of the tpr files.
self.tprfile=self.inp.getInput('trajectories[0].tpr')
# Set a flag to indicate if we have written the maxstate.pdb-file
have_maxstate=0
for i in xrange(NumStates):
traj_num = RandomConfs[i][0][0]
frame_nr = RandomConfs[i][0][1]
lh5name = Proj.GetTrajFilename(traj_num)
#sys.stderr.write("trajectory name=%s\n"%lh5name)
trajdata = self.trajData[lh5name]
trajname = trajdata.xtc
#trajname = trajname.replace('.nopbc.lh5','.xtc')
time = frame_nr * trajdata.dt #* self.nstxtcout
#if(i<10*self.num_to_start):
#proc = subprocess.Popen(["trjconv","-f","%s"%trajname,"-s","%s"%os.path.join(self.inp.getOutputDir(),'topol.tpr'),"-o",os.path.join(self.inp.getOutputDir(),'micro%d.gro'%i),"-pbc","mol","-dump","%d"%time], stdin=subprocess.PIPE, stdout=sys.stdout, stderr=sys.stderr)
#proc.communicate("0")
# Write out a pdb of the most populated state
if(i==MaxState and have_maxstate==0):
maxstatefn=os.path.join(self.inp.getOutputDir(), 'maxstate.pdb')
sys.stderr.write("writing out pdb of most populated state.\n")
args = self.cmdnames.trjconv.split()
args += ["-f", trajname, "-s", self.tprfile,
"-o", maxstatefn, "-pbc", "mol", "-dump", "%d" % time]
if self.ndx is not None:
args.extend( [ "-n", self.ndx ] )
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=sys.stdout, stderr=sys.stderr)
proc.communicate(self.grpname)
self.out.setOut('maxstate', FileValue(maxstatefn))
have_maxstate=1
# now evenly sample configurations and put them in the array
# newRuns. If we're later assigning macrosates, we'll overwrite them
# with adaptive sampling configurations
self.newRuns=[]
for j in xrange(self.num_to_start*self.num_macro):
# pick a cluster at random:
i=random.random()*int(NumStates)
traj_num = RandomConfs[i][0][0]
frame_nr = RandomConfs[i][0][1]
lh5name = Proj.GetTrajFilename(traj_num)
trajdata = self.trajData[lh5name]
trajname = trajdata.xtc
time = frame_nr * trajdata.dt
#maxstatefn=os.path.join(self.inp.getOutputDir(), '.conf')
outfn=os.path.join(self.inp.getOutputDir(), 'new_run_%d.gro'%(j))
args = self.cmdnames.trjconv.split()
args += ["-f", "%s"%trajname, "-s", self.tprfile,
"-o", outfn, "-pbc", "mol", "-dump", "%d" % time]
sys.stderr.write("writing out new run %s .\n"%outfn)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
proc.communicate('0')
self.newRuns.append(outfn)
#os.remove('mdout.mdp')
# Make a plot of the rmsd vs rel. population (rmsd)
# NumConfsPerState=1
# RandomConfs = Proj.GetRandomConfsFromEachState(Assignments,NumStates,NumConfsPerState,JustGetIndices=False)
# Allatoms=RandomConfs["Atoms"]
# CA=intersect1d(AtomRange,where(Allatoms=="CA")[0])
# rmsd=RandomConfs.CalcRMSD(C1,CA,CA).reshape((NumStates,NumConfsPerState)).mean(1)
# NumEigen=NumStates/100
# EigAns=msmbuilder.MSMLib.GetEigenvectors(T,NumEigen);
# Populations=EigAns[1][:,0]
# plt.plot(rmsd,-log(Populations),'o')
# plt.title("Free Energy Versus RMSD [nm]")
# plt.ylabel("Free Energy")
# plt.xlabel("RMSD [nm]")
# plt.savefig(os.path.join('cpc-data','msm_fe.png'))
# plt.close()
def createMacroStates(self):
''' Build a macro-state MSM '''
# Again we redirect output
#stdoutfn=os.path.join(self.inp.getOutputDir(), 'msm_stdout_macro.txt')
#stderrfn=os.path.join(self.inp.getOutputDir(), 'msm_stderr_macro.txt')
#old_stdout = sys.stdout
#sys.stdout=open(stdoutfn,'w')
#old_stderr = sys.stderr
#sys.stderr=open(stderrfn,'w')
Map = msmbuilder.MSMLib.PCCA(self.T,self.num_macro)
Assignments = self.assignments
Assignments = Map[Assignments]
NumStates = max(Assignments.flatten())+1
sys.stderr.write("Calculating macrostates with lag time %g.\n"%
self.lag_time)
# Now repeat any calculations with the new assignments
Counts = msmbuilder.MSMLib.GetCountMatrixFromAssignments(Assignments,
self.num_macro,
LagTime=self.lag_time,
Slide=True)
#PK want reversible MLE estimator again here
sys.stderr.write("Recalculating assignments & trimming again.\n")
CountsAfterTrimming,Mapping=msmbuilder.MSMLib.ErgodicTrim(Counts)
msmbuilder.MSMLib.ApplyMappingToAssignments(Assignments,Mapping)
ReversibleCounts = msmbuilder.MSMLib.IterativeDetailedBalance(
CountsAfterTrimming,
Prior=0)
TC = msmbuilder.MSMLib.EstimateTransitionMatrix(ReversibleCounts)
Populations=numpy.array(ReversibleCounts.sum(0)).flatten()
Populations/=Populations.sum()
# Again, get the most populated state
X0 = array((Counts+Counts.transpose()).sum(0)).flatten()
X0 = X0/sum(X0)
MaxState = argmax(X0)
tcoutf=os.path.join(self.inp.getOutputDir(), "tc.dat")
if scipy.sparse.issparse(TC):
scipy.savetxt(tcoutf, TC.todense())
else:
numpy.savetxt(tcoutf, TC, fmt="%12.6g" )
self.out.setOut('macro_transition_counts', FileValue(tcoutf))
woutf=os.path.join(self.inp.getOutputDir(), "weights.dat")
numpy.savetxt(woutf, X0, fmt="%12.6g" )
self.out.setOut('macro_weights', FileValue(woutf))
# Do adaptive sampling on the macrostates
nstates=int(self.num_macro*self.num_to_start)
sys.stderr.write("Adaptive sampling to %d=%d*%d states.\n"%
(nstates, self.num_macro, self.num_to_start))
Proj = self.Proj
StartStates = Proj.AdaptiveSampling(Counts.toarray(),nstates)
#print StartStates
#PK note JustGetIndices gives indices into original conformations
RandomConfs = Proj.GetRandomConfsFromEachState(Assignments,NumStates,1,
JustGetIndices=True)
self.newRuns=[]
self.macroConfs=[]
for k,v in StartStates.items():
num_started = 0
for i in xrange(NumStates):
if i==k:
trajnum = RandomConfs[i][0][0]
frame_nr = RandomConfs[i][0][1]
lh5name = Proj.GetTrajFilename(trajnum)
trajdata = self.trajData[lh5name]
trajname = trajdata.xtc
time = frame_nr * trajdata.dt #* self.nstxtcout
#time = frame_nr * self.dt *self.nstxtcout
#trajname = Proj.GetTrajFilename(trajnum)
#trajname = trajname.replace('.nopbc.lh5','.xtc')
first=True
# Use trjconv to write new starting confs
while(num_started < self.num_to_start):
sys.stderr.write("Writing new start confs.\n")
outfn=os.path.join(self.inp.getOutputDir(),
'macro%d-%d.gro'%(i,num_started))
args = self.cmdnames.trjconv.split()
args += ["-f", "%s" % trajname, "-s", self.tprfile,
"-o", outfn,
"-pbc", "mol", "-dump", "%d" % time]
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=sys.stdout,
stderr=sys.stderr)
proc.communicate('0')
num_started = num_started + 1
self.newRuns.append(outfn)
if first:
self.macroConfs.append(outfn)
first=False
# now set the macro state outputs:
i=0
for fname in self.macroConfs:
self.out.setOut('macro_conf[%d]'%i, cpc.dataflow.FileValue(fname))
i+=1
| gpl-2.0 |
marcindulak/accts | accts/hpcsite.py | 1 | 2271 | import os
import subprocess
from agts import Cluster
def wrap_pylab(names=[]):
"""Use Agg backend and prevent windows from popping up."""
import matplotlib
matplotlib.use('Agg')
import pylab
def show(names=names):
if names:
name = names.pop(0)
else:
name = 'fig.png'
pylab.savefig(name)
pylab.show = show
class HPCCluster(Cluster):
def __init__(self):
pass
def write_pylab_wrapper(self, job):
"""Use Agg backend and prevent windows from popping up."""
fd = open(job.script + '.py', 'w')
fd.write('from hpcsite import wrap_pylab\n')
fd.write('wrap_pylab(%s)\n' % job.show)
fd.write('execfile(%r)\n' % job.script)
fd.close()
def submit(self, job):
dir = os.getcwd()
os.chdir(job.dir)
self.write_pylab_wrapper(job)
if job.queueopts is None:
if job.ncpus < 4:
ppn = '%d:opteron4' % job.ncpus
nodes = 1
elif job.ncpus % 16 == 0:
ppn = '16:xeon16'
nodes = job.ncpus // 16
elif job.ncpus % 8 == 0:
ppn = '8:xeon8'
nodes = job.ncpus // 8
else:
assert job.ncpus % 4 == 0
ppn = '4:opteron4'
nodes = job.ncpus // 4
queueopts = '-l nodes=%d:ppn=%s' % (nodes, ppn)
else:
queueopts = job.queueopts
qsub = (['/apps/dcc/bin/qsub', '-V'] + queueopts
+ ['-l',
'walltime=%02d:%02d:00' %
(job.walltime // 3600, job.walltime % 3600 // 60),
'-N',
job.name])
p = subprocess.Popen(qsub,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate(
'cd ${PBS_O_WORKDIR}\n' +
'touch %s.start\n' % job.name +
'export PYTHONPATH=' + dir +'/accts:${PYTHONPATH}&& ' +
' %s %s.py %s > %s.output\n' %
('python', job.script, job.args, job.name) +
'echo $? > %s.done\n' % job.name)
assert p.returncode == 0
id = out.split('.')[0]
job.pbsid = id
os.chdir(dir)
| gpl-3.0 |
stormsson/procedural_city_generation_wrapper | vendor/stormsson/pcg_wrapper/config_functions/input_image_setup.py | 1 | 1391 | import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import procedural_city_generation
class ImageSetup():
def __init__(self, temp_dir_path):
self.temp_dir_path = temp_dir_path
def getImages(self,rule_image_path, density_image_path):
'''
Loads the rule-image and population-density-image from the filesystem.
Saves the density image in /temp/ folder so that it could be ensured.
Parameters
----------
rule_image_name: String
Name of the rule_image specified in Parameters
density_image_name: String
Name of the density_image specified in Parameters
Returns
--------
rule_img: np.ndarray
Rule-image as numpy array
density_img: np.ndarray
Density-image as numpy array
'''
#TODO: Document
rule_img = mpimg.imread(rule_image_path)
density_img = mpimg.imread(density_image_path)
density_image_name = os.path.basename(density_image_path)
plt.imsave(self.temp_dir_path+"/"+density_image_name.split(".")[0]+"diffused.png", density_img, cmap='gray')
with open(self.temp_dir_path+"/"+density_image_name.split(".")[0]+"isdiffused.txt", 'w') as f:
f.write("False")
rule_img*=255
density_img*=255
return rule_img, density_img
| mpl-2.0 |
secondfoundation/Second-Foundation-Src | src/fed/reservant.py | 1 | 2375 | #!/bin/py
#
# Reservant is Latin, for Reserve.
#
# http://stackoverflow.com/questions/8139822/how-to-load-training-data-in-pybrain
#
#
# In this case the neural network has 3 inputs and 1 output.
# The csv file has 4 values on each line separated by a comma.
# The first 3 values are input values and the last one is the output.
#
# To calculate the number of hidden nodes we use a
# general rule of: (Number of inputs + outputs) * (2/3)"
#
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
import pylab as pl
#
# will always have only a single output: the federal funds rate
# number of features can vary
#
# you will never require more than twice the number
# of hidden units as you have inputs
#
# ftp://ftp.sas.com/pub/neural/FAQ3.html#A_hu
features=int(3)
hidden =int(60)
steps =int(10000)
ds = SupervisedDataSet(features,1)
#tf = open('raw_dat/matrix.dat','r')
tf = open('raw_dat/diff.dat','r')
gdp=[]
cpi=[]
une=[]
i=0
time=[]
fund=[]
for line in tf.readlines():
data = [float(x) for x in line.strip().split('\t') if x != '']
#print data
time.append(i)
i=i+1
# first feature is GDP
gdp.append(data[0])
# second feature is cpi
cpi.append(data[1])
# third feature is unemployment
une.append(data[2])
fund.append(data[3])
indata = tuple(data[:features])
outdata = tuple(data[features:])
ds.addSample(indata,outdata)
# this builds a network that has the number of features as input,
# a *SINGLE* defined hidden layer and a single output neuron.
n = buildNetwork(ds.indim,hidden,hidden,ds.outdim)
t = BackpropTrainer(n,learningrate=0.01,momentum=0.8,verbose=True)
t.trainOnDataset(ds,steps)
t.testOnData(verbose=True)
# let's plot what we have
import matplotlib.pyplot as plt
# lets ask for a prediction: GDP,CPI, Unemployment
#print n.activate([.02,.02,-.002])
x = []
y = []
#print range(len(time))
for i in range(len(time)):
#print n.activate([gdp(i),cpi(i),une(i)])
x.append(.25*time[i]+1954.5)
y.append(n.activate([gdp[i],cpi[i],une[i]]))
pl.plot(x,fund)
pl.plot(x,y)
plt.title('Neural Network Predictor')
plt.legend(['Federal Funds Rate % Change','Predicted Rate Change'])
pl.show()
#plt.plot(time,gdp,'x',time,cpi,'o',time,une,'.')
#plt.show()
#
# nick and jay
#
# 8/28/12
#
| lgpl-2.1 |
jayflo/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
liesbethvanherpe/NeuroM | neurom/view/tests/test_common.py | 5 | 7452 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .utils import get_fig_2d, get_fig_3d # needs to be at top to trigger matplotlib Agg backend
import os
from nose import tools as nt
import shutil
import tempfile
import numpy as np
from neurom.view.common import (plt, figure_naming, get_figure, save_plot, plot_style,
plot_title, plot_labels, plot_legend, update_plot_limits, plot_ticks,
plot_sphere, plot_cylinder)
def test_figure_naming():
pretitle, posttitle, prefile, postfile = figure_naming(pretitle='Test', prefile="", postfile=3)
nt.eq_(pretitle, 'Test -- ')
nt.eq_(posttitle, "")
nt.eq_(prefile, "")
nt.eq_(postfile, "_3")
pretitle, posttitle, prefile, postfile = figure_naming(pretitle='', posttitle="Test", prefile="test", postfile="")
nt.eq_(pretitle, "")
nt.eq_(posttitle, " -- Test")
nt.eq_(prefile, "test_")
nt.eq_(postfile, "")
def test_get_figure():
fig_old = plt.figure()
fig, ax = get_figure(new_fig=False)
nt.eq_(fig, fig_old)
nt.eq_(ax.colNum, 0)
nt.eq_(ax.rowNum, 0)
fig1, ax1 = get_figure(new_fig=True, subplot=224)
nt.ok_(fig1 != fig_old)
nt.eq_(ax1.colNum, 1)
nt.eq_(ax1.rowNum, 1)
fig2, ax2 = get_figure(new_fig=True, subplot=[1, 1, 1])
nt.eq_(ax2.colNum, 0)
nt.eq_(ax2.rowNum, 0)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
fig2, ax2 = get_figure(new_fig=False)
nt.eq_(fig2, plt.gcf())
nt.eq_(ax2, plt.gca())
plt.close('all')
def test_save_plot():
fig_name = 'Figure.png'
tempdir = tempfile.mkdtemp('test_common')
try:
old_dir = os.getcwd()
os.chdir(tempdir)
fig_old = plt.figure()
fig = save_plot(fig_old)
nt.ok_(os.path.isfile(fig_name))
os.remove(fig_name)
fig = save_plot(fig_old, output_path='subdir')
nt.ok_(os.path.isfile(os.path.join(tempdir, 'subdir', fig_name)))
finally:
os.chdir(old_dir)
shutil.rmtree(tempdir)
plt.close('all')
def test_plot_title():
with get_fig_2d() as (fig, ax):
plot_title(ax)
nt.eq_(ax.get_title(), 'Figure')
with get_fig_2d() as (fig, ax):
plot_title(ax, title='Test')
nt.eq_(ax.get_title(), 'Test')
def test_plot_labels():
with get_fig_2d() as (fig, ax):
plot_labels(ax)
nt.eq_(ax.get_xlabel(), 'X')
nt.eq_(ax.get_ylabel(), 'Y')
with get_fig_2d() as (fig, ax):
plot_labels(ax, xlabel='T', ylabel='R')
nt.eq_(ax.get_xlabel(), 'T')
nt.eq_(ax.get_ylabel(), 'R')
with get_fig_3d() as (fig0, ax0):
plot_labels(ax0)
nt.eq_(ax0.get_zlabel(), 'Z')
with get_fig_3d() as (fig0, ax0):
plot_labels(ax0, zlabel='T')
nt.eq_(ax0.get_zlabel(), 'T')
def test_plot_legend():
with get_fig_2d() as (fig, ax):
plot_legend(ax)
legend = ax.get_legend()
nt.ok_(legend is None)
with get_fig_2d() as (fig, ax):
ax.plot([1, 2, 3], [1, 2, 3], label='line 1')
plot_legend(ax, no_legend=False)
legend = ax.get_legend()
nt.eq_(legend.get_texts()[0].get_text(), 'line 1')
def test_plot_limits():
with get_fig_2d() as (fig, ax):
nt.assert_raises(AssertionError, update_plot_limits, ax, white_space=0)
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
update_plot_limits(ax, white_space=0)
nt.eq_(ax.get_xlim(), (0, 100))
nt.eq_(ax.get_ylim(), (-100, 0))
with get_fig_3d() as (fig0, ax0):
update_plot_limits(ax0, white_space=0)
zlim0 = ax0.get_zlim()
nt.ok_(np.allclose(ax0.get_zlim(), zlim0))
def test_plot_ticks():
with get_fig_2d() as (fig, ax):
plot_ticks(ax)
nt.ok_(len(ax.get_xticks()))
nt.ok_(len(ax.get_yticks()))
with get_fig_2d() as (fig, ax):
plot_ticks(ax, xticks=[], yticks=[])
nt.eq_(len(ax.get_xticks()), 0)
nt.eq_(len(ax.get_yticks()), 0)
with get_fig_2d() as (fig, ax):
plot_ticks(ax, xticks=np.arange(3), yticks=np.arange(4))
nt.eq_(len(ax.get_xticks()), 3)
nt.eq_(len(ax.get_yticks()), 4)
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0)
nt.ok_(len(ax0.get_zticks()))
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0, zticks=[])
nt.eq_(len(ax0.get_zticks()), 0)
with get_fig_3d() as (fig0, ax0):
plot_ticks(ax0, zticks=np.arange(3))
nt.eq_(len(ax0.get_zticks()), 3)
def test_plot_style():
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax)
nt.eq_(ax.get_title(), 'Figure')
nt.eq_(ax.get_xlabel(), 'X')
nt.eq_(ax.get_ylabel(), 'Y')
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax, no_axes=True)
nt.ok_(not ax.get_frame_on())
nt.ok_(not ax.xaxis.get_visible())
nt.ok_(not ax.yaxis.get_visible())
with get_fig_2d() as (fig, ax):
ax.dataLim.update_from_data_xy(((0, -100), (100, 0)))
plot_style(fig, ax, tight=True)
nt.ok_(fig.get_tight_layout())
def test_plot_cylinder():
fig0, ax0 = get_figure(params={'projection': '3d'})
start, end = np.array([0, 0, 0]), np.array([1, 0, 0])
plot_cylinder(ax0, start=start, end=end,
start_radius=0, end_radius=10.,
color='black', alpha=1.)
nt.ok_(ax0.has_data())
def test_plot_sphere():
fig0, ax0 = get_figure(params={'projection': '3d'})
plot_sphere(ax0, [0, 0, 0], 10., color='black', alpha=1.)
nt.ok_(ax0.has_data())
| bsd-3-clause |
kastnerkyle/crikey | conditional_audio/fruit/fruitspeecher.py | 1 | 27533 | import numpy as np
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from scipy.io import wavfile
import os
import sys
from kdllib import load_checkpoint, dense_to_one_hot, plot_lines_iamondb_example
from kdllib import fetch_fruitspeech, list_iterator, np_zeros, GRU, GRUFork
from kdllib import make_weights, as_shared, adam, gradient_clipping
from kdllib import get_values_from_function, set_shared_variables_in_function
from kdllib import save_checkpoint, save_weights, sample_diagonal_gmm
from kdllib import diagonal_gmm, diagonal_phase_gmm, soundsc
if __name__ == "__main__":
import argparse
speech = fetch_fruitspeech()
X = speech["data"]
y = speech["target"]
vocabulary = speech["vocabulary"]
vocabulary_size = speech["vocabulary_size"]
reconstruct = speech["reconstruct"]
fs = speech["sample_rate"]
X = np.array([x.astype(theano.config.floatX) for x in X])
y = np.array([yy.astype(theano.config.floatX) for yy in y])
minibatch_size = 20
n_epochs = 20000 # Used way at the bottom in the training loop!
checkpoint_every_n = 500
# Was 300
cut_len = 41 # Used way at the bottom in the training loop!
random_state = np.random.RandomState(1999)
train_itr = list_iterator([X, y], minibatch_size, axis=1, stop_index=80,
randomize=True, make_mask=True)
valid_itr = list_iterator([X, y], minibatch_size, axis=1, start_index=80,
make_mask=True)
X_mb, X_mb_mask, c_mb, c_mb_mask = next(train_itr)
train_itr.reset()
input_dim = X_mb.shape[-1]
n_hid = 400
n_v_hid = 100
att_size = 10
n_components = 20
n_out = X_mb.shape[-1]
n_chars = vocabulary_size
# mag and phase each n_out // 2
# one 2 for mu, sigma , + n_components for coeff
n_density = 2 * n_out // 2 * n_components + n_components
desc = "Speech generation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-s', '--sample',
help='Sample from a checkpoint file',
default=None,
required=False)
parser.add_argument('-p', '--plot',
help='Plot training curves from a checkpoint file',
default=None,
required=False)
parser.add_argument('-w', '--write',
help='The string to use',
default=None,
required=False)
# http://stackoverflow.com/questions/12116685/how-can-i-require-my-python-scripts-argument-to-be-a-float-between-0-0-1-0-usin
def restricted_float(x):
x = float(x)
if x < 0.0:
raise argparse.ArgumentTypeError("%r not range [0.0, inf]" % (x,))
return x
parser.add_argument('-b', '--bias',
help='Bias parameter as a float',
type=restricted_float,
default=.1,
required=False)
def restricted_int(x):
if x is None:
# None makes it "auto" sample
return x
x = int(x)
if x < 1:
raise argparse.ArgumentTypeError("%r not range [1, inf]" % (x,))
return x
parser.add_argument('-sl', '--sample_length',
help='Number of steps to sample, default is automatic',
type=restricted_int,
default=None,
required=False)
parser.add_argument('-c', '--continue', dest="cont",
help='Continue training from another saved model',
default=None,
required=False)
args = parser.parse_args()
if args.plot is not None or args.sample is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if args.sample is not None:
checkpoint_file = args.sample
else:
checkpoint_file = args.plot
if not os.path.exists(checkpoint_file):
raise ValueError("Checkpoint file path %s" % checkpoint_file,
" does not exist!")
print(checkpoint_file)
checkpoint_dict = load_checkpoint(checkpoint_file)
train_costs = checkpoint_dict["overall_train_costs"]
valid_costs = checkpoint_dict["overall_valid_costs"]
plt.plot(train_costs)
plt.plot(valid_costs)
plt.savefig("costs.png")
X_mb, X_mb_mask, c_mb, c_mb_mask = next(valid_itr)
valid_itr.reset()
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
bias = args.bias
if args.sample is not None:
predict_function = checkpoint_dict["predict_function"]
attention_function = checkpoint_dict["attention_function"]
sample_function = checkpoint_dict["sample_function"]
if args.write is not None:
sample_string = args.write
print("Sampling using sample string %s" % sample_string)
oh = dense_to_one_hot(
np.array([vocabulary[c] for c in sample_string]),
vocabulary_size)
c_mb = np.zeros(
(len(oh), minibatch_size, oh.shape[-1])).astype(c_mb.dtype)
c_mb[:len(oh), :, :] = oh[:, None, :]
c_mb = c_mb[:len(oh)]
c_mb_mask = np.ones_like(c_mb[:, :, 0])
if args.sample_length is None:
# Automatic sampling stop as described in Graves' paper
# Assume an average of 30 timesteps per char
n_steps = 30 * c_mb.shape[0]
step_inc = n_steps
max_steps = 25000
max_steps_buf = max_steps + n_steps
completed = [np.zeros((max_steps_buf, X_mb.shape[-1]))
for i in range(c_mb.shape[1])]
max_indices = [None] * c_mb.shape[1]
completed_indices = set()
# hardcoded upper limit
while n_steps < max_steps:
rvals = sample_function(c_mb, c_mb_mask, prev_h1, prev_h2,
prev_h3, prev_kappa, prev_w, bias,
n_steps)
sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h = rvals
for i in range(c_mb.shape[1]):
max_ind = None
for j in range(len(stop_s)):
if np.all(stop_h[j, i] > stop_s[j, i]):
max_ind = j
if max_ind is not None:
completed_indices = completed_indices | set([i])
completed[i][:max_ind] = sampled[:max_ind, i]
max_indices[i] = max_ind
# if most samples meet the criteria call it good
if len(completed_indices) >= .8 * c_mb.shape[1]:
break
n_steps += step_inc
print("Completed auto sampling after %i steps" % n_steps)
# cut out garbage
completed = [completed[i] for i in completed_indices]
cond = c_mb[:, np.array(list(completed_indices))]
else:
fixed_steps = args.sample_length
rvals = sample_function(c_mb, c_mb_mask, prev_h1, prev_h2,
prev_h3, prev_kappa, prev_w, bias,
fixed_steps)
sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h = rvals
completed = [sampled[:, i]
for i in range(sampled.shape[1])]
cond = c_mb
print("Completed sampling after %i steps" % fixed_steps)
rlookup = {v: k for k, v in vocabulary.items()}
for i in range(len(completed)):
ex = completed[i]
ex_str = "".join([rlookup[c]
for c in np.argmax(cond[:, i], axis=1)])
s = "gen_%s_%i.wav" % (ex_str, i)
ii = reconstruct(ex)
wavfile.write(s, fs, soundsc(ii))
valid_itr.reset()
print("Sampling complete, exiting...")
sys.exit()
else:
print("No plotting arguments, starting training mode!")
X_sym = tensor.tensor3("X_sym")
X_sym.tag.test_value = X_mb[:cut_len]
X_mask_sym = tensor.matrix("X_mask_sym")
X_mask_sym.tag.test_value = X_mb_mask[:cut_len]
c_sym = tensor.tensor3("c_sym")
c_sym.tag.test_value = c_mb
c_mask_sym = tensor.matrix("c_mask_sym")
c_mask_sym.tag.test_value = c_mb_mask
bias_sym = tensor.scalar("bias_sym")
bias_sym.tag.test_value = 0.
init_h1 = tensor.matrix("init_h1")
init_h1.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h2 = tensor.matrix("init_h2")
init_h2.tag.test_value = np_zeros((minibatch_size, n_hid))
init_h3 = tensor.matrix("init_h3")
init_h3.tag.test_value = np_zeros((minibatch_size, n_hid))
init_kappa = tensor.matrix("init_kappa")
init_kappa.tag.test_value = np_zeros((minibatch_size, att_size))
init_w = tensor.matrix("init_w")
init_w.tag.test_value = np_zeros((minibatch_size, n_chars))
params = []
cell1 = GRU(input_dim, n_hid, random_state)
cell2 = GRU(n_hid, n_hid, random_state)
cell3 = GRU(n_hid, n_hid, random_state)
params += cell1.get_params()
params += cell2.get_params()
params += cell3.get_params()
v_cell1 = GRU(1, n_v_hid, random_state)
params += v_cell1.get_params()
# Use GRU classes only to fork 1 inp to 2 inp:gate pairs
inp_to_h1 = GRUFork(input_dim, n_hid, random_state)
inp_to_h2 = GRUFork(input_dim, n_hid, random_state)
inp_to_h3 = GRUFork(input_dim, n_hid, random_state)
att_to_h1 = GRUFork(n_chars, n_hid, random_state)
att_to_h2 = GRUFork(n_chars, n_hid, random_state)
att_to_h3 = GRUFork(n_chars, n_hid, random_state)
h1_to_h2 = GRUFork(n_hid, n_hid, random_state)
h1_to_h3 = GRUFork(n_hid, n_hid, random_state)
h2_to_h3 = GRUFork(n_hid, n_hid, random_state)
params += inp_to_h1.get_params()
params += inp_to_h2.get_params()
params += inp_to_h3.get_params()
params += att_to_h1.get_params()
params += att_to_h2.get_params()
params += att_to_h3.get_params()
params += h1_to_h2.get_params()
params += h1_to_h3.get_params()
params += h2_to_h3.get_params()
inp_to_v_h1 = GRUFork(1, n_v_hid, random_state)
params += inp_to_v_h1.get_params()
h1_to_att_a, h1_to_att_b, h1_to_att_k = make_weights(n_hid, 3 * [att_size],
random_state)
params += [h1_to_att_a, h1_to_att_b, h1_to_att_k]
# Need a , on single results since it always returns a list
h1_to_outs, = make_weights(n_hid, [n_hid], random_state)
h2_to_outs, = make_weights(n_hid, [n_hid], random_state)
h3_to_outs, = make_weights(n_hid, [n_hid], random_state)
params += [h1_to_outs, h2_to_outs, h3_to_outs]
# 2 * for mag and phase
v_outs_to_corr_outs, = make_weights(n_v_hid, [1], random_state)
corr_outs_to_final_outs, = make_weights(n_hid, [2 * n_density],
random_state)
params += [v_outs_to_corr_outs, corr_outs_to_final_outs]
inpt = X_sym[:-1]
target = X_sym[1:]
mask = X_mask_sym[1:]
context = c_sym * c_mask_sym.dimshuffle(0, 1, 'x')
inp_h1, inpgate_h1 = inp_to_h1.proj(inpt)
inp_h2, inpgate_h2 = inp_to_h2.proj(inpt)
inp_h3, inpgate_h3 = inp_to_h3.proj(inpt)
u = tensor.arange(c_sym.shape[0]).dimshuffle('x', 'x', 0)
u = tensor.cast(u, theano.config.floatX)
def calc_phi(k_t, a_t, b_t, u_c):
a_t = a_t.dimshuffle(0, 1, 'x')
b_t = b_t.dimshuffle(0, 1, 'x')
ss1 = (k_t.dimshuffle(0, 1, 'x') - u_c) ** 2
ss2 = -b_t * ss1
ss3 = a_t * tensor.exp(ss2)
ss4 = ss3.sum(axis=1)
return ss4
def step(xinp_h1_t, xgate_h1_t,
xinp_h2_t, xgate_h2_t,
xinp_h3_t, xgate_h3_t,
h1_tm1, h2_tm1, h3_tm1,
k_tm1, w_tm1, ctx):
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss4 = calc_phi(k_t, a_t, b_t, u)
ss5 = ss4.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
return h1_t, h2_t, h3_t, k_t, w_t
init_x = as_shared(np_zeros((minibatch_size, n_out)))
srng = RandomStreams(1999)
def _slice_outs(outs):
k = n_components
half = n_out // 2
outs = outs.reshape((-1, n_density))
mu = outs[:, 0:half * k].reshape((-1, half, k))
sigma = outs[:, half * k:2 * half * k].reshape(
(-1, half, k))
coeff = outs[:, 2 * half * k:]
sigma = tensor.exp(sigma - bias_sym) + 1E-6
coeff = tensor.nnet.softmax(coeff * (1. + bias_sym)) + 1E-6
return mu, sigma, coeff
# Used to calculate stopping heuristic from sections 5.3
u_max = 0. * tensor.arange(c_sym.shape[0]) + c_sym.shape[0]
u_max = u_max.dimshuffle('x', 'x', 0)
u_max = tensor.cast(u_max, theano.config.floatX)
def sample_out_step(x_tm1, v_h1_tm1):
vinp_h1_t, vgate_h1_t = inp_to_v_h1.proj(x_tm1)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
return v_h1_t
def sample_step(x_tm1, h1_tm1, h2_tm1, h3_tm1, k_tm1, w_tm1, ctx):
xinp_h1_t, xgate_h1_t = inp_to_h1.proj(x_tm1)
xinp_h2_t, xgate_h2_t = inp_to_h2.proj(x_tm1)
xinp_h3_t, xgate_h3_t = inp_to_h3.proj(x_tm1)
attinp_h1, attgate_h1 = att_to_h1.proj(w_tm1)
h1_t = cell1.step(xinp_h1_t + attinp_h1, xgate_h1_t + attgate_h1,
h1_tm1)
h1inp_h2, h1gate_h2 = h1_to_h2.proj(h1_t)
h1inp_h3, h1gate_h3 = h1_to_h3.proj(h1_t)
a_t = h1_t.dot(h1_to_att_a)
b_t = h1_t.dot(h1_to_att_b)
k_t = h1_t.dot(h1_to_att_k)
a_t = tensor.exp(a_t)
b_t = tensor.exp(b_t)
k_t = k_tm1 + tensor.exp(k_t)
ss_t = calc_phi(k_t, a_t, b_t, u)
# calculate and return stopping criteria
sh_t = calc_phi(k_t, a_t, b_t, u_max)
ss5 = ss_t.dimshuffle(0, 1, 'x')
ss6 = ss5 * ctx.dimshuffle(1, 0, 2)
w_t = ss6.sum(axis=1)
attinp_h2, attgate_h2 = att_to_h2.proj(w_t)
attinp_h3, attgate_h3 = att_to_h3.proj(w_t)
h2_t = cell2.step(xinp_h2_t + h1inp_h2 + attinp_h2,
xgate_h2_t + h1gate_h2 + attgate_h2, h2_tm1)
h2inp_h3, h2gate_h3 = h2_to_h3.proj(h2_t)
h3_t = cell3.step(xinp_h3_t + h1inp_h3 + h2inp_h3 + attinp_h3,
xgate_h3_t + h1gate_h3 + h2gate_h3 + attgate_h3,
h3_tm1)
out_t = h1_t.dot(h1_to_outs) + h2_t.dot(h2_to_outs) + h3_t.dot(
h3_to_outs)
out_t = out_t.dimshuffle(1, 0, 'x')
# vertical scan
init_v_out = tensor.zeros((out_t.shape[1], n_v_hid))
v_out_t, updates = theano.scan(
fn=sample_out_step,
sequences=[out_t],
outputs_info=[init_v_out])
corr_out_t = v_out_t.dot(v_outs_to_corr_outs)
corr_out_t = corr_out_t[:, :, 0].dimshuffle(1, 0)
corr_out_t = corr_out_t.dot(corr_outs_to_final_outs)
split = corr_out_t.shape[-1] // 2
mag_out_t = corr_out_t[:, :split]
phase_out_t = corr_out_t[:, split:]
mu_mag, sigma_mag, coeff_mag = _slice_outs(mag_out_t)
mu_phase, sigma_phase, coeff_phase = _slice_outs(phase_out_t)
s_mag = sample_diagonal_gmm(mu_mag, sigma_mag, coeff_mag, srng)
s_phase = sample_diagonal_gmm(mu_phase, sigma_phase, coeff_phase, srng)
"""
# Set sample to debug in order to check test values
s_mag = sample_diagonal_gmm(mu_mag, sigma_mag, coeff_mag, srng,
debug=True)
s_phase = sample_diagonal_gmm(mu_phase, sigma_phase, coeff_phase, srng,
debug=True)
"""
s_phase = tensor.mod(s_phase + np.pi, 2 * np.pi) - np.pi
x_t = tensor.concatenate([s_mag, s_phase], axis=-1)
return x_t, h1_t, h2_t, h3_t, k_t, w_t, ss_t, sh_t
n_steps_sym = tensor.iscalar()
n_steps_sym.tag.test_value = 10
(sampled, h1_s, h2_s, h3_s, k_s, w_s, stop_s, stop_h), supdates = theano.scan(
fn=sample_step,
n_steps=n_steps_sym,
sequences=[],
outputs_info=[init_x, init_h1, init_h2, init_h3,
init_kappa, init_w, None, None],
non_sequences=[context])
"""
# Testing step function
r = step(inp_h1[0], inpgate_h1[0], inp_h2[0], inpgate_h2[0],
inp_h3[0], inpgate_h3[0],
init_h1, init_h2, init_h3, init_kappa, init_w, context)
r = step(inp_h1[1], inpgate_h1[1], inp_h2[1], inpgate_h2[1],
inp_h3[1], inpgate_h3[1],
r[0], r[1], r[2], r[3], r[4], context)
"""
(h1, h2, h3, kappa, w), updates = theano.scan(
fn=step,
sequences=[inp_h1, inpgate_h1,
inp_h2, inpgate_h2,
inp_h3, inpgate_h3],
outputs_info=[init_h1, init_h2, init_h3, init_kappa, init_w],
non_sequences=[context])
outs = h1.dot(h1_to_outs) + h2.dot(h2_to_outs) + h3.dot(h3_to_outs)
orig_shapes = outs.shape
outs = outs.dimshuffle(2, 1, 0)
# pre project? cutting down to 1 dim really hurts
outs = outs.reshape((orig_shapes[2], orig_shapes[1] * orig_shapes[0], 1))
def out_step(x_tm1, v_h1_tm1):
vinp_h1_t, vgate_h1_t = inp_to_v_h1.proj(x_tm1)
v_h1_t = v_cell1.step(vinp_h1_t, vgate_h1_t, v_h1_tm1)
return v_h1_t
init_v_outs = tensor.zeros((outs.shape[1], n_v_hid))
v_outs, updates = theano.scan(
fn=out_step,
sequences=[outs],
outputs_info=[init_v_outs])
corr_outs = v_outs.dot(v_outs_to_corr_outs)
corr_outs = corr_outs[:, :, 0].reshape((orig_shapes[2], orig_shapes[1],
orig_shapes[0]))
corr_outs = corr_outs.dimshuffle(2, 1, 0)
corr_outs = corr_outs.dot(corr_outs_to_final_outs)
split = corr_outs.shape[-1] // 2
mag_outs = corr_outs[:, :, :split]
phase_outs = corr_outs[:, :, split:]
mu_mag, sigma_mag, coeff_mag = _slice_outs(mag_outs)
mu_phase, sigma_phase, coeff_phase = _slice_outs(phase_outs)
target_split = n_out // 2
mag_target = target[:, :, :target_split]
phase_target = target[:, :, target_split:]
mag_cost = diagonal_gmm(
mag_target, mu_mag, sigma_mag, coeff_mag)
phase_cost = diagonal_phase_gmm(
phase_target, mu_phase, sigma_phase, coeff_phase)
cost = mag_cost + phase_cost
cost = cost * mask
cost = cost.sum() / cut_len
grads = tensor.grad(cost, params)
grads = gradient_clipping(grads, 10.)
learning_rate = 1E-3
opt = adam(params, learning_rate)
updates = opt.updates(params, grads)
train_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w, bias_sym],
[cost, h1, h2, h3, kappa, w],
updates=updates)
cost_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w, bias_sym],
[cost, h1, h2, h3, kappa, w])
predict_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w, bias_sym],
[corr_outs],
on_unused_input='warn')
attention_function = theano.function([X_sym, X_mask_sym, c_sym, c_mask_sym,
init_h1, init_h2, init_h3, init_kappa,
init_w],
[kappa, w], on_unused_input='warn')
sample_function = theano.function([c_sym, c_mask_sym, init_h1, init_h2,
init_h3, init_kappa, init_w, bias_sym,
n_steps_sym],
[sampled, h1_s, h2_s, h3_s, k_s, w_s,
stop_s, stop_h],
updates=supdates)
checkpoint_dict = {}
checkpoint_dict["train_function"] = train_function
checkpoint_dict["cost_function"] = cost_function
checkpoint_dict["predict_function"] = predict_function
checkpoint_dict["attention_function"] = attention_function
checkpoint_dict["sample_function"] = sample_function
print("Beginning training loop")
train_mb_count = 0
valid_mb_count = 0
start_epoch = 0
monitor_frequency = 1000 // minibatch_size
overall_train_costs = []
overall_valid_costs = []
if args.cont is not None:
continue_path = args.cont
if not os.path.exists(continue_path):
raise ValueError("Continue model %s, path not "
"found" % continue_path)
saved_checkpoint = load_checkpoint(continue_path)
trained_weights = get_values_from_function(
saved_checkpoint["train_function"])
set_shared_variables_in_function(train_function, trained_weights)
try:
overall_train_costs = saved_checkpoint["overall_train_costs"]
overall_valid_costs = saved_checkpoint["overall_valid_costs"]
start_epoch = len(overall_train_costs)
except KeyError:
print("Key not found - model structure may have changed.")
print("Continuing anyways - statistics may not be correct!")
def _loop(function, itr):
prev_h1, prev_h2, prev_h3 = [np_zeros((minibatch_size, n_hid))
for i in range(3)]
prev_kappa = np_zeros((minibatch_size, att_size))
prev_w = np_zeros((minibatch_size, n_chars))
X_mb, X_mb_mask, c_mb, c_mb_mask = next(itr)
n_cuts = len(X_mb) // cut_len + 1
partial_costs = []
for n in range(n_cuts):
start = n * cut_len
stop = (n + 1) * cut_len
if len(X_mb[start:stop]) < cut_len:
new_len = cut_len - len(X_mb) % cut_len
zeros = np.zeros((new_len, X_mb.shape[1],
X_mb.shape[2]))
zeros = zeros.astype(X_mb.dtype)
mask_zeros = np.zeros((new_len, X_mb_mask.shape[1]))
mask_zeros = mask_zeros.astype(X_mb_mask.dtype)
X_mb = np.concatenate((X_mb, zeros), axis=0)
X_mb_mask = np.concatenate((X_mb_mask, mask_zeros), axis=0)
assert len(X_mb[start:stop]) == cut_len
assert len(X_mb_mask[start:stop]) == cut_len
bias = 0. # No bias in training
rval = function(X_mb[start:stop],
X_mb_mask[start:stop],
c_mb, c_mb_mask,
prev_h1, prev_h2, prev_h3, prev_kappa, prev_w, bias)
current_cost = rval[0]
prev_h1, prev_h2, prev_h3 = rval[1:4]
prev_h1 = prev_h1[-1]
prev_h2 = prev_h2[-1]
prev_h3 = prev_h3[-1]
prev_kappa = rval[4][-1]
prev_w = rval[5][-1]
partial_costs.append(current_cost)
return partial_costs
for e in range(start_epoch, start_epoch + n_epochs):
train_costs = []
try:
while True:
partial_train_costs = _loop(train_function, train_itr)
train_costs.append(np.mean(partial_train_costs))
if train_mb_count % monitor_frequency == 0:
print("starting train mb %i" % train_mb_count)
print("current epoch mean cost %f" % np.mean(train_costs))
train_mb_count += 1
except StopIteration:
valid_costs = []
try:
while True:
partial_valid_costs = _loop(cost_function, valid_itr)
valid_costs.append(np.mean(partial_valid_costs))
if valid_mb_count % monitor_frequency == 0:
print("starting valid mb %i" % valid_mb_count)
print("current validation mean cost %f" % np.mean(
valid_costs))
valid_mb_count += 1
except StopIteration:
pass
mean_epoch_train_cost = np.mean(train_costs)
if np.isnan(overall_train_costs[-1]) or np.isinf(
overall_train_costs[-1]):
print("Invalid cost detected at epoch %i" % e)
raise ValueError("Exiting...")
mean_epoch_valid_cost = np.mean(valid_costs)
overall_train_costs.append(mean_epoch_train_cost)
overall_valid_costs.append(mean_epoch_valid_cost)
checkpoint_dict["overall_train_costs"] = overall_train_costs
checkpoint_dict["overall_valid_costs"] = overall_valid_costs
print("script %s" % os.path.realpath(__file__))
print("epoch %i complete" % e)
print("epoch mean train cost %f" % mean_epoch_train_cost)
print("epoch mean valid cost %f" % mean_epoch_valid_cost)
print("overall train costs %s" % overall_train_costs[-5:])
print("overall valid costs %s" % overall_valid_costs[-5:])
if ((e % checkpoint_every_n) == 0) or (e == (n_epochs - 1)):
print("Checkpointing...")
checkpoint_save_path = "model_checkpoint_%i.pkl" % e
weights_save_path = "model_weights_%i.npz" % e
save_checkpoint(checkpoint_save_path, checkpoint_dict)
save_weights(weights_save_path, checkpoint_dict)
| bsd-3-clause |
LabMagUBO/StoneX | concepts/flood_fill/flood_fill.py | 1 | 1027 | import sys
import numpy as np
import scipy as sp
from matplotlib import pyplot as pl
import scipy.ndimage as nd
f = lambda x, y: x**2 + y**2 + 1.1*np.sin(4*y) + 1.1*np.cos(2*(x+y))**2
pos = np.array([0, 1])
n = 50
x = np.linspace(-2., 2., n)
y = np.linspace(-2., 2., n)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
#ax2 = fig.add_subplot(214, aspect='equal')
#ax.pcolormesh(X, Y, Z, cmap = pl.cm.hot)
ax.contourf(X, Y, Z, 15, alpha=0.9, cmap=pl.cm.hot)
C = ax.contour(X, Y, Z, 15, colors='black', linewidth=.5)
ax.clabel(C, inline=1, fontsize=10)
seuil = 0.3
zones_risky = Z < seuil
zones_labeled, zones_num = nd.measurements.label(zones_risky)
ax.contourf(X, Y, zones_risky, 10, alpha=0.5, cmap=pl.cm.gray)
print("Nombre de zones : ",zones_num)
ax.contourf(X, Y, zones_labeled, 15, alpha=0.4, cmap=pl.cm.hot)
pl.savefig('test.pdf')
#actual_label = zones_labeled(pos[0], pos[1])
#zones_actual =
#ax2.contourf(X, Y, zones_labe, 10, alpha=0.5, cmap=pl.cm.gray))
| gpl-3.0 |
OshynSong/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
imperial-genomics-facility/data-management-python | ehive/runnable/process/UpdateProjectInfo.py | 1 | 8337 | import os,subprocess
import pandas as pd
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.utils.fileutils import get_temp_dir, remove_dir
from igf_data.utils.fileutils import copy_remote_file
from igf_data.utils.projectutils import get_project_read_count,get_seqrun_info_for_project
from igf_data.utils.gviz_utils import convert_to_gviz_json_for_display
from igf_data.utils.project_data_display_utils import convert_project_data_gviz_data,add_seqrun_path_info
from igf_data.utils.project_status_utils import Project_status
class UpdateProjectInfo(IGFBaseProcess):
'''
An ehive runnable class for updating data for project info page
'''
def param_defaults(self):
params_dict=super(UpdateProjectInfo,self).param_defaults()
params_dict.update({
'remote_project_path':None,
'remote_user':None,
'remote_host':None,
'seqruninfofile':'seqruninfofile.json',
'samplereadcountfile':'samplereadcountfile.json',
'samplereadcountcsvfile':'samplereadcountfile.csv',
'status_data_json':'status_data.json',
'pipeline_name':None,
'analysis_pipeline_name':None,
'sample_column':'sample_igf_id',
'use_ephemeral_space':0,
})
return params_dict
def run(self):
try:
seqrun_igf_id = self.param_required('seqrun_igf_id')
project_name = self.param_required('project_name')
remote_project_path = self.param_required('remote_project_path')
igf_session_class = self.param_required('igf_session_class')
remote_user = self.param_required('remote_user')
remote_host = self.param_required('remote_host')
seqruninfofile = self.param('seqruninfofile')
samplereadcountfile = self.param('samplereadcountfile')
samplereadcountcsvfile = self.param('samplereadcountcsvfile')
status_data_json = self.param('status_data_json')
pipeline_name = self.param_required('pipeline_name')
analysis_pipeline_name = self.param_required('analysis_pipeline_name')
sample_column = self.param('sample_column')
use_ephemeral_space = self.param('use_ephemeral_space')
temp_work_dir = \
get_temp_dir(use_ephemeral_space=use_ephemeral_space) # get a temp dir
temp_read_count_output = \
os.path.join(\
temp_work_dir,
samplereadcountfile) # get path for temp read count file
temp_read_count_csv_output = \
os.path.join(\
temp_work_dir,
samplereadcountcsvfile) # get path for temp read count csv file
temp_seqrun_info = \
os.path.join(\
temp_work_dir,
seqruninfofile) # get path for temp seqrun info file
raw_read_count = \
get_project_read_count(\
session_class=igf_session_class,
project_igf_id=project_name) # get raw read count for project
(description,read_count_data,column_order) = \
convert_project_data_gviz_data(input_data=raw_read_count) # convert read count to gviz requirements
convert_to_gviz_json_for_display(\
description=description,
data=read_count_data,
columns_order=column_order,
output_file=temp_read_count_output) # write data to output json file
read_count_data = pd.DataFrame(read_count_data)
if not isinstance(read_count_data,pd.DataFrame):
raise ValueError('Expecting a pandas dataframe, and got {0}'.\
format(type(read_count_data)))
read_count_data.\
set_index(sample_column).\
to_csv(\
temp_read_count_csv_output,
index=True) # create csv output for project data
seqrun_data = \
get_seqrun_info_for_project(\
session_class=igf_session_class,
project_igf_id=project_name) # fetch seqrun info for each projects
add_seqrun_path_info(\
input_data=seqrun_data,
output_file=temp_seqrun_info) # write seqrun info json
remote_project_dir = \
os.path.join(\
remote_project_path,
project_name) # get remote project directory path
self._check_and_copy_remote_file(\
remote_user=remote_user,
remote_host=remote_host,
source_file=temp_seqrun_info,
remote_file=os.path.join(remote_project_dir,
seqruninfofile)) # copy seqrun info file to remote
self._check_and_copy_remote_file(\
remote_user=remote_user,
remote_host=remote_host,
source_file=temp_read_count_output,
remote_file=os.path.join(remote_project_dir,
samplereadcountfile)) # copy file sample read count json file to remote
os.chmod(temp_read_count_csv_output, mode=0o754) # changed file permission before copy
self._check_and_copy_remote_file(\
remote_user=remote_user,
remote_host=remote_host,
source_file=temp_read_count_csv_output,
remote_file=os.path.join(remote_project_dir,
samplereadcountcsvfile)) # copy file sample read count csv file to remote
ps = Project_status(\
igf_session_class=igf_session_class,
project_igf_id=project_name)
temp_status_output = \
os.path.join(\
temp_work_dir,
status_data_json) # get path for temp status file
ps.generate_gviz_json_file(\
output_file=temp_status_output,
demultiplexing_pipeline=pipeline_name,
analysis_pipeline=analysis_pipeline_name,
active_seqrun_igf_id=seqrun_igf_id) # write data to output json file
self._check_and_copy_remote_file(\
remote_user=remote_user,
remote_host=remote_host,
source_file=temp_status_output,
remote_file=os.path.join(remote_project_dir,
status_data_json)) # copy file project status file to remote
self.param('dataflow_params',{'remote_project_info':'done'})
remove_dir(temp_work_dir) # remove temp dir
except Exception as e:
message = \
'seqrun: {2}, Error in {0}: {1}'.\
format(\
self.__class__.__name__,
e,
seqrun_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise
@staticmethod
def _check_and_copy_remote_file(remote_user,remote_host,source_file,
remote_file):
'''
An internal static method for copying files to remote path
:param remote_user: Username for the remote server
:param remote_host: Hostname for the remote server
:param source_file: Source filepath
:param remote_file: Remote filepath
'''
try:
if not os.path.exists(source_file):
raise IOError('Source file {0} not found for copy'.\
format(source_file))
os.chmod(source_file, mode=0o754) # change source file permission before copy
remote_address = \
'{0}@{1}'.format(\
remote_user,
remote_host)
copy_remote_file(\
source_path=source_file,
destinationa_path=remote_file,
destination_address=remote_address) # create dir and copy file to remote
except:
raise | apache-2.0 |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/example/bayesian-methods/bdk_demo.py | 45 | 15837 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
| apache-2.0 |
yaukwankiu/armor | tests/modifiedMexicanHatTest7a.py | 1 | 2450 | # supplementing modifiedMexicanHatTest5.py
# outputing the charts, given the results
import numpy as np
import matplotlib.pyplot as plt
from armor import pattern
from armor import defaultParameters as dp
dbz = pattern.DBZ
DS = pattern.DBZstream
dataFolder = dp.root + "labLogs/2014-5-2-modifiedMexicanHatTest5/"
outputFolder= dataFolder
WRFnames = [ "WRF"+("0"+str(v))[-2:] for v in range(1,21)]
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256, 320,]
allWRFsStreamMean = 0.
dbzCount = 0
for WRFname in WRFnames:
ds = DS(dataFolder=dataFolder,
name="rainband_march2014" + WRFname,
outputFolder="",
imageFolder="",
key1=WRFname, # keywords to pick out specific files
key2="LOGspec.dat",
key3="WRF_Rainband", #safety check
preload=True,
imageExtension = '.png', #added 2013-09-27
dataExtension = '.dat',
)
print "\n==================\nSaving histograms for ", ds.name
for dbzpattern in ds:
dbzCount += 1
streamMeanUpdate = np.array([(dbzpattern.matrix==v).sum() for v in sigmas])
allWRFsStreamMean = 1.* ((allWRFsStreamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
histogramName = "kongreywrf" + dbzpattern.dataTime + WRFname + "_LOGspec_histogram"+ ds.imageExtension
print dbzpattern.name, "->", histogramName
plt.clf()
dbzpattern.histogram(display=False, outputPath=outputFolder+histogramName)
plt.close()
plt.plot(sigmas, allWRFsStreamMean)
plt.title(ds.name + '- average laplacian-of-gaussian max-response spectrum for ' +str(dbzCount) + 'WRF patterns')
plt.savefig(outputFolder + ds.name + "_all_wrfs_average_LoG_max_response spectrum.png")
plt.close()
"""
# run modifiedMexicanHatTest6a.py and then:
allWRFsStreamMean = array([ 2562.4375, 655.5625, 526.15 , 741.51 , 858.6425,
1457.79 , 1710.095 , 2971.355 , 3561.9125, 4406.915 ,
1488.0375, 59.5925, 0. , 0. , 0. , 0. ])
streamMeanCOMPREF = streamMean
sigmas = np.array(sigmas)
plt.close()
plt.plot(sigmas, streamMeanCOMPREF)
plt.plot(sigmas[:-4]*4, allWRFsStreamMean[:-4]*16)
plt.title("COMPREF and WRFs mean max-response LOG spectra from Kong-Rey data")
plt.show()
"""
| cc0-1.0 |
MartinDelzant/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jaidevd/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 103 | 2017 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matplotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
llhe/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 31 | 60315 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
pyramania/scipy | scipy/signal/filter_design.py | 1 | 129497 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
import numpy
import numpy as np
from numpy import (atleast_1d, poly, polyval, roots, real, asarray,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array,
mintypecode)
from numpy.polynomial.polynomial import polyval as npp_polyval
from scipy import special, optimize
from scipy.special import comb, factorial
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay',
'sosfreqz', 'iirnotch', 'iirpeak']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the M-order numerator `b` and N-order denominator `a` of an analog
filter, compute its frequency response::
b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M]
H(w) = ----------------------------------------------
a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=False, plot=None):
"""
Compute the frequency response of a digital filter.
Given the M-order numerator `b` and N-order denominator `a` of a digital
filter, compute its frequency response::
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ---- = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
sosfreqz
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def sosfreqz(sos, worN=None, whole=False):
"""
Compute the frequency response of a digital filter in SOS format.
Given `sos`, an array with shape (n, 6) of second order sections of
a digital filter, compute the frequency response of the system function::
B0(z) B1(z) B{n-1}(z)
H(z) = ----- * ----- * ... * ---------
A0(z) A1(z) A{n-1}(z)
for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and
denominator of the transfer function of the k-th second order section.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response, as complex numbers.
See Also
--------
freqz, sosfilt
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
Design a 15th-order bandpass filter in SOS format.
>>> from scipy import signal
>>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='sos')
Compute the frequency response at 1500 points from DC to Nyquist.
>>> w, h = signal.sosfreqz(sos, worN=1500)
Plot the response.
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.ylim(-75, 5)
>>> plt.grid(True)
>>> plt.yticks([0, -20, -40, -60])
>>> plt.ylabel('Gain [dB]')
>>> plt.title('Frequency Response')
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.grid(True)
>>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi],
... ['$-\pi$', '$-\pi/2$', '0', '$\pi/2$', '$\pi$'])
>>> plt.ylabel('Phase [rad]')
>>> plt.xlabel('Normalized frequency (1.0 = Nyquist)')
>>> plt.show()
If the same filter is implemented as a single transfer function,
numerical error corrupts the frequency response:
>>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass',
... output='ba')
>>> w, h = signal.freqz(b, a, worN=1500)
>>> plt.subplot(2, 1, 1)
>>> db = 20*np.log10(np.abs(h))
>>> plt.plot(w/np.pi, db)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(w/np.pi, np.angle(h))
>>> plt.show()
"""
sos, n_sections = _validate_sos(sos)
if n_sections == 0:
raise ValueError('Cannot compute frequencies with no sections')
h = 1.
for row in sos:
w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole)
h *= rowh
return w, h
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def _align_nums(nums):
"""Aligns the shapes of multiple numerators.
Given an array of numerator coefficient arrays [[a_1, a_2,...,
a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator
arrays with zero's so that all numerators have the same length. Such
alignment is necessary for functions like 'tf2ss', which needs the
alignment when dealing with SIMO transfer functions.
Parameters
----------
nums: array_like
Numerator or list of numerators. Not necessarily with same length.
Returns
-------
nums: array
The numerator. If `nums` input was a list of numerators then a 2d
array with padded zeros for shorter numerators is returned. Otherwise
returns ``np.asarray(nums)``.
"""
try:
# The statement can throw a ValueError if one
# of the numerators is a single digit and another
# is array-like e.g. if nums = [5, [1, 2, 3]]
nums = asarray(nums)
if not np.issubdtype(nums.dtype, np.number):
raise ValueError("dtype of numerator is non-numeric")
return nums
except ValueError:
nums = [np.atleast_1d(num) for num in nums]
max_width = max(num.size for num in nums)
# pre-allocate
aligned_nums = np.zeros((len(nums), max_width))
# Create numerators with padded zeros
for index, num in enumerate(nums):
aligned_nums[index, -num.size:] = num
return aligned_nums
def normalize(b, a):
"""Normalize numerator/denominator of a continuous-time transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
Parameters
----------
b: array_like
Numerator of the transfer function. Can be a 2d array to normalize
multiple transfer functions.
a: array_like
Denominator of the transfer function. At most 1d.
Returns
-------
num: array
The numerator of the normalized transfer function. At least a 1d
array. A 2d-array if the input `num` is a 2d array.
den: 1d-array
The denominator of the normalized transfer function.
Notes
-----
Coefficients for both the numerator and denominator should be specified in
descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``).
"""
num, den = b, a
den = np.atleast_1d(den)
num = np.atleast_2d(_align_nums(num))
if den.ndim != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if num.ndim > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if np.all(den == 0):
raise ValueError("Denominator must have at least on nonzero element.")
# Trim leading zeros in denominator, leave at least one.
den = np.trim_zeros(den, 'f')
# Normalize transfer function
num, den = num / den[0], den / den[0]
# Count numerator columns that are all zero
leading_zeros = 0
for col in num.T:
if np.allclose(col, 0, atol=1e-14):
leading_zeros += 1
else:
break
# Trim leading zeros of numerator
if leading_zeros > 0:
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
# Make sure at least one column remains
if leading_zeros == num.shape[1]:
leading_zeros -= 1
num = num[:, leading_zeros:]
# Squeeze first dimension if singular
if num.shape[0] == 1:
num = num[0, :]
return num, den
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth-order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord, buttap
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord, cheb1ap
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth-order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord, cheb2ap
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth-order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord, ellipap
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'):
"""
Bessel/Thomson digital and analog filter design.
Design an Nth-order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies (defined
by the `norm` parameter).
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned. (See Notes.)
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
norm : {'phase', 'delay', 'mag'}, optional
Critical frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for
both low-pass and high-pass filters, so this is the
"phase-matched" case.
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by
solving Bessel polynomials.
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency `Wn`.
.. versionadded:: 0.18.0
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response. [1]_
The Bessel is inherently an analog filter. This function generates digital
Bessel filters using the bilinear transform, which does not preserve the
phase response of the analog filter. As such, it is only approximately
correct at frequencies below about fs/4. To get maximally-flat group
delay at higher frequencies, the analog Bessel filter must be transformed
using phase-preserving techniques.
See `besselap` for implementation details and references.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the phase-normalized frequency response, showing the relationship
to the Butterworth's cutoff frequency (green):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter magnitude response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
and the phase midpoint:
>>> plt.figure()
>>> plt.semilogx(w, np.unwrap(np.angle(h)))
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-np.pi, color='red') # phase midpoint
>>> plt.title('Bessel filter phase response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Phase [radians]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the magnitude-normalized frequency response, showing the -3 dB cutoff:
>>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag')
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.axhline(-3, color='red') # -3 dB magnitude
>>> plt.axvline(10, color='green') # cutoff frequency
>>> plt.title('Magnitude-normalized Bessel filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
Plot the delay-normalized filter, showing the maximally-flat group delay
at 0.1 seconds:
>>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay')
>>> w, h = signal.freqs(b, a)
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.axhline(0.1, color='red') # 0.1 seconds group delay
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
References
----------
.. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel_'+norm)
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth-order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
See Also
--------
butter : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
cheby1 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
See Also
--------
cheby2 : Filter design function using this prototype
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth-order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
See Also
--------
ellip : Filter design function using this prototype
References
----------
.. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing",
Chapters 5 and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
# TODO: Make this a real public function scipy.misc.ff
def _falling_factorial(x, n):
r"""
Return the factorial of `x` to the `n` falling.
This is defined as:
.. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1)
This can more efficiently calculate ratios of factorials, since:
n!/m! == falling_factorial(n, n-m)
where n >= m
skipping the factors that cancel out
the usual factorial n! == ff(n, n)
"""
val = 1
for k in range(x - n + 1, x + 1):
val *= k
return val
def _bessel_poly(n, reverse=False):
"""
Return the coefficients of Bessel polynomial of degree `n`
If `reverse` is true, a reverse Bessel polynomial is output.
Output is a list of coefficients:
[1] = 1
[1, 1] = 1*s + 1
[1, 3, 3] = 1*s^2 + 3*s + 3
[1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15
[1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105
etc.
Output is a Python list of arbitrary precision long ints, so n is only
limited by your hardware's memory.
Sequence is http://oeis.org/A001498 , and output can be confirmed to
match http://oeis.org/A001498/b001498.txt :
>>> i = 0
>>> for n in range(51):
... for x in _bessel_poly(n, reverse=True):
... print(i, x)
... i += 1
"""
if abs(int(n)) != n:
raise ValueError("Polynomial order must be a nonnegative integer")
else:
n = int(n) # np.int32 doesn't work, for instance
out = []
for k in range(n + 1):
num = _falling_factorial(2*n - k, n)
den = 2**(n - k) * factorial(k, exact=True)
out.append(num // den)
if reverse:
return out[::-1]
else:
return out
def _campos_zeros(n):
"""
Return approximate zero locations of Bessel polynomials y_n(x) for order
`n` using polynomial fit (Campos-Calderon 2011)
"""
if n == 1:
return asarray([-1+0j])
s = npp_polyval(n, [0, 0, 2, 0, -3, 1])
b3 = npp_polyval(n, [16, -8]) / s
b2 = npp_polyval(n, [-24, -12, 12]) / s
b1 = npp_polyval(n, [8, 24, -12, -2]) / s
b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s
r = npp_polyval(n, [0, 0, 2, 1])
a1 = npp_polyval(n, [-6, -6]) / r
a2 = 6 / r
k = np.arange(1, n+1)
x = npp_polyval(k, [0, a1, a2])
y = npp_polyval(k, [b0, b1, b2, b3])
return x + 1j*y
def _aberth(f, fp, x0, tol=1e-15, maxiter=50):
"""
Given a function `f`, its first derivative `fp`, and a set of initial
guesses `x0`, simultaneously find the roots of the polynomial using the
Aberth-Ehrlich method.
``len(x0)`` should equal the number of roots of `f`.
(This is not a complete implementation of Bini's algorithm.)
"""
N = len(x0)
x = array(x0, complex)
beta = np.empty_like(x0)
for iteration in range(maxiter):
alpha = -f(x) / fp(x) # Newton's method
# Model "repulsion" between zeros
for k in range(N):
beta[k] = np.sum(1/(x[k] - x[k+1:]))
beta[k] += np.sum(1/(x[k] - x[:k]))
x += alpha / (1 + alpha * beta)
if not all(np.isfinite(x)):
raise RuntimeError('Root-finding calculation failed')
# Mekwi: The iterative process can be stopped when |hn| has become
# less than the largest error one is willing to permit in the root.
if all(abs(alpha) <= tol):
break
else:
raise Exception('Zeros failed to converge')
return x
def _bessel_zeros(N):
"""
Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of
modified Bessel function of the second kind
"""
if N == 0:
return asarray([])
# Generate starting points
x0 = _campos_zeros(N)
# Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary
# Bessel polynomial y_N(x)
def f(x):
return special.kve(N+0.5, 1/x)
# First derivative of above
def fp(x):
return (special.kve(N-0.5, 1/x)/(2*x**2) -
special.kve(N+0.5, 1/x)/(x**2) +
special.kve(N+1.5, 1/x)/(2*x**2))
# Starting points converge to true zeros
x = _aberth(f, fp, x0)
# Improve precision using Newton's method on each
for i in range(len(x)):
x[i] = optimize.newton(f, x[i], fp, tol=1e-15)
# Average complex conjugates to make them exactly symmetrical
x = np.mean((x, x[::-1].conj()), 0)
# Zeros should sum to -1
if abs(np.sum(x) + 1) > 1e-15:
raise RuntimeError('Generated zeros are inaccurate')
return x
def _norm_factor(p, k):
"""
Numerically find frequency shift to apply to delay-normalized filter such
that -3 dB point is at 1 rad/sec.
`p` is an array_like of polynomial poles
`k` is a float gain
First 10 values are listed in "Bessel Scale Factors" table,
"Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond."
"""
p = asarray(p, dtype=complex)
def G(w):
"""
Gain of filter
"""
return abs(k / prod(1j*w - p))
def cutoff(w):
"""
When gain = -3 dB, return 0
"""
return G(w) - 1/np.sqrt(2)
return optimize.newton(cutoff, 1.5)
def besselap(N, norm='phase'):
"""
Return (z,p,k) for analog prototype of an Nth-order Bessel filter.
Parameters
----------
N : int
The order of the filter.
norm : {'phase', 'delay', 'mag'}, optional
Frequency normalization:
``phase``
The filter is normalized such that the phase response reaches its
midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This
happens for both low-pass and high-pass filters, so this is the
"phase-matched" case. [6]_
The magnitude response asymptotes are the same as a Butterworth
filter of the same order with a cutoff of `Wn`.
This is the default, and matches MATLAB's implementation.
``delay``
The filter is normalized such that the group delay in the passband
is 1 (e.g. 1 second). This is the "natural" type obtained by
solving Bessel polynomials
``mag``
The filter is normalized such that the gain magnitude is -3 dB at
angular frequency 1. This is called "frequency normalization" by
Bond. [1]_
.. versionadded:: 0.18.0
Returns
-------
z : ndarray
Zeros of the transfer function. Is always an empty array.
p : ndarray
Poles of the transfer function.
k : scalar
Gain of the transfer function. For phase-normalized, this is always 1.
See Also
--------
bessel : Filter design function using this prototype
Notes
-----
To find the pole locations, approximate starting points are generated [2]_
for the zeros of the ordinary Bessel polynomial [3]_, then the
Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to
calculate more accurate zeros, and these locations are then inverted about
the unit circle.
References
----------
.. [1] C.R. Bond, "Bessel Filter Constants",
http://www.crbond.com/papers/bsf.pdf
.. [2] Campos and Calderon, "Approximate closed-form formulas for the
zeros of the Bessel Polynomials", :arXiv:`1105.0957`.
.. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency
Characteristics", Proceedings of the Institution of Electrical
Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490.
.. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial
Simultaneously", Mathematics of Computation, Vol. 27, No. 122,
April 1973
.. [5] Ehrlich, "A modified Newton method for polynomials", Communications
of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967,
:DOI:`10.1145/363067.363115`
.. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to
Others", RaneNote 147, 1998, http://www.rane.com/note147.html
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
if N == 0:
p = []
k = 1
else:
# Find roots of reverse Bessel polynomial
p = 1/_bessel_zeros(N)
a_last = _falling_factorial(2*N, N) // 2**N
# Shift them to a different normalization if required
if norm in ('delay', 'mag'):
# Normalized for group delay of 1
k = a_last
if norm == 'mag':
# -3 dB magnitude point is at 1 rad/sec
norm_factor = _norm_factor(p, k)
p /= norm_factor
k = norm_factor**-N * a_last
elif norm == 'phase':
# Phase-matched (1/2 max phase shift at 1 rad/sec)
# Asymptotes are same as Butterworth filter
p *= 10**(-math.log10(a_last)/N)
k = 1
else:
raise ValueError('normalization not understood')
return asarray([]), asarray(p, dtype=complex), float(k)
def iirnotch(w0, Q):
"""
Design second-order IIR notch digital filter.
A notch filter is a band-stop filter with a narrow bandwidth
(high quality factor). It rejects a narrow frequency band and
leaves the rest of the spectrum little changed.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirpeak
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the 60Hz component from a
signal sampled at 200Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 60.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design notch filter
>>> b, a = signal.iirnotch(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-25, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "notch")
def iirpeak(w0, Q):
"""
Design second-order IIR peak (resonant) digital filter.
A peak filter is a band-pass filter with a narrow bandwidth
(high quality factor). It rejects components outside a narrow
frequency band.
Parameters
----------
w0 : float
Normalized frequency to be retained in a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding
to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
peak filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
See Also
--------
iirnotch
Notes
-----
.. versionadded: 0.19.0
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996
Examples
--------
Design and plot filter to remove the frequencies other than the 300Hz
component from a signal sampled at 1000Hz, using a quality factor Q = 30
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 300.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> w0 = f0/(fs/2) # Normalized Frequency
>>> # Design peak filter
>>> b, a = signal.iirpeak(w0, Q)
>>> # Frequency response
>>> w, h = signal.freqz(b, a)
>>> # Generate frequency axis
>>> freq = w*fs/(2*np.pi)
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6))
>>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-50, 10])
>>> ax[0].grid()
>>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid()
>>> plt.show()
"""
return _design_notch_peak_filter(w0, Q, "peak")
def _design_notch_peak_filter(w0, Q, ftype):
"""
Design notch or peak digital filter.
Parameters
----------
w0 : float
Normalized frequency to remove from a signal. It is a
scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1``
corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : str
The type of IIR filter to design:
- notch filter : ``notch``
- peak filter : ``peak``
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
"""
# Guarantee that the inputs are floats
w0 = float(w0)
Q = float(Q)
# Checks if w0 is within the range
if w0 > 1.0 or w0 < 0.0:
raise ValueError("w0 should be such that 0 < w0 < 1")
# Get bandwidth
bw = w0/Q
# Normalize inputs
bw = bw*np.pi
w0 = w0*np.pi
# Compute -3dB atenuation
gb = 1/np.sqrt(2)
if ftype == "notch":
# Compute beta: formula 11.3.4 (p.575) from reference [1]
beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0)
elif ftype == "peak":
# Compute beta: formula 11.3.19 (p.579) from reference [1]
beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0)
else:
raise ValueError("Unknown ftype.")
# Compute gain: formula 11.3.6 (p.575) from reference [1]
gain = 1.0/(1.0+beta)
# Compute numerator b and denominator a
# formulas 11.3.7 (p.575) and 11.3.21 (p.579)
# from reference [1]
if ftype == "notch":
b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0])
else:
b = (1.0-gain)*np.array([1.0, 0.0, -1.0])
a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)])
return b, a
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'bessel_phase': [besselap],
'bessel_delay': [besselap],
'bessel_mag': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
bessel_norms = {'bessel': 'phase',
'bessel_phase': 'phase',
'bessel_delay': 'delay',
'bessel_mag': 'mag'}
| bsd-3-clause |
Caoimhinmg/PmagPy | programs/deprecated/zeq_magic2.py | 1 | 43301 | #!/usr/bin/env python
import sys
import os
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def save_redo(SpecRecs,inspec):
print "Saving changes to specimen file"
pmag.magic_write(inspec,SpecRecs,'pmag_specimens')
def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in magic_measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a pmag_specimens formatted file and allows re-interpretations of best-fit lines
and planes and saves (revised or new) interpretations in a pmag_specimens file.
interpretations are saved in the coordinate system used. Also allows judicious editting of
measurements to eliminate "bad" measurements. These are marked as such in the magic_measurements
input file. they are NOT deleted, just ignored.
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets magic_measurements format input file, default: magic_measurements.txt
-fsp SPECFILE: sets pmag_specimens format file with prior interpreations, default: zeq_specimens.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-fsa SAMPFILE: sets er_samples format file with orientation information, default: er_samples.txt
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
SCREEN OUTPUT:
Specimen, N, a95, StepMin, StepMax, Dec, Inc, calculation type
"""
# initialize some variables
doave,e,b=1,0,0 # average replicates, initial end and beginning step
plots,coord=0,'s'
noorient=0
version_num=pmag.get_version()
verbose=pmagplotlib.verbose
beg_pca,end_pca,direction_type="","",'l'
calculation_type,fmt="","svg"
user,spec_keys,locname="",[],''
plot_file=""
sfile=""
plot_file=""
PriorRecs=[] # empty list for prior interpretations
backup=0
specimen="" # can skip everything and just plot one specimen with bounds e,b
if '-h' in sys.argv:
print main.__doc__
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
else:
dir_path='.'
inspec=dir_path+'/'+'zeq_specimens.txt'
meas_file,geo,tilt,ask,samp_file=dir_path+'/magic_measurements.txt',0,0,0,dir_path+'/er_samples.txt'
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
if '-fsp' in sys.argv:
ind=sys.argv.index('-fsp')
inspec=dir_path+'/'+sys.argv[ind+1]
if '-fsa' in sys.argv:
ind=sys.argv.index('-fsa')
samp_file=dir_path+'/'+sys.argv[ind+1]
sfile='ok'
if '-crd' in sys.argv:
ind=sys.argv.index('-crd')
coord=sys.argv[ind+1]
if coord=='g' or coord=='t':
samp_data,file_type=pmag.magic_read(samp_file)
if file_type=='er_samples':sfile='ok'
geo=1
if coord=='t':tilt=1
if '-spc' in sys.argv:
ind=sys.argv.index('-spc')
specimen=sys.argv[ind+1]
if '-dir' in sys.argv:
ind=sys.argv.index('-dir')
direction_type=sys.argv[ind+1]
beg_pca=int(sys.argv[ind+2])
end_pca=int(sys.argv[ind+3])
if direction_type=='L':calculation_type='DE-BFL'
if direction_type=='P':calculation_type='DE-BFP'
if direction_type=='F':calculation_type='DE-FM'
if '-Fp' in sys.argv:
ind=sys.argv.index('-Fp')
plot_file=dir_path+'/'+sys.argv[ind+1]
if '-A' in sys.argv: doave=0
if '-sav' in sys.argv:
plots=1
verbose=0
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
#
first_save=1
meas_data,file_type=pmag.magic_read(meas_file)
changeM,changeS=0,0 # check if data or interpretations have changed
if file_type != 'magic_measurements':
print file_type
print file_type,"This is not a valid magic_measurements file "
sys.exit()
for rec in meas_data:
if "magic_method_codes" not in rec.keys(): rec["magic_method_codes"]=""
methods=""
tmp=rec["magic_method_codes"].replace(" ","").split(":")
for meth in tmp:
methods=methods+meth+":"
rec["magic_method_codes"]=methods[:-1] # get rid of annoying spaces in Anthony's export files
if "magic_instrument_codes" not in rec.keys() :rec["magic_instrument_codes"]=""
PriorSpecs=[]
PriorRecs,file_type=pmag.magic_read(inspec)
if len(PriorRecs)==0:
if verbose:print "starting new file ",inspec
for Rec in PriorRecs:
if 'magic_software_packages' not in Rec.keys():Rec['magic_software_packages']=""
if Rec['er_specimen_name'] not in PriorSpecs:
if 'specimen_comp_name' not in Rec.keys():Rec['specimen_comp_name']="A"
PriorSpecs.append(Rec['er_specimen_name'])
else:
if 'specimen_comp_name' not in Rec.keys():Rec['specimen_comp_name']="A"
if "magic_method_codes" in Rec.keys():
methods=[]
tmp=Rec["magic_method_codes"].replace(" ","").split(":")
for meth in tmp:
methods.append(meth)
if 'DE-FM' in methods:
Rec['calculation_type']='DE-FM' # this won't be imported but helps
if 'DE-BFL' in methods:
Rec['calculation_type']='DE-BFL'
if 'DE-BFL-A' in methods:
Rec['calculation_type']='DE-BFL-A'
if 'DE-BFL-O' in methods:
Rec['calculation_type']='DE-BFL-O'
if 'DE-BFP' in methods:
Rec['calculation_type']='DE-BFP'
else:
Rec['calculation_type']='DE-BFL' # default is to assume a best-fit line
#
# get list of unique specimen names
#
sids=pmag.get_specs(meas_data)
#
# set up plots, angle sets X axis to horizontal, direction_type 'l' is best-fit line
# direction_type='p' is great circle
#
#
# draw plots for sample s - default is just to step through zijderveld diagrams
#
#
# define figure numbers for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
ZED={}
ZED['eqarea'],ZED['zijd'], ZED['demag']=1,2,3
pmagplotlib.plot_init(ZED['eqarea'],5,5)
pmagplotlib.plot_init(ZED['zijd'],6,5)
pmagplotlib.plot_init(ZED['demag'],5,5)
save_pca=0
if specimen=="":
k = 0
else:
k=sids.index(specimen)
angle,direction_type="",""
setangle=0
CurrRecs=[]
while k < len(sids):
CurrRecs=[]
if setangle==0:angle=""
method_codes,inst_code=[],""
s=sids[k]
PmagSpecRec={}
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec['magic_software_packages']=version_num
PmagSpecRec['specimen_description']=""
PmagSpecRec['magic_method_codes']=""
if verbose and s!="":print s, k , 'out of ',len(sids)
#
# collect info for the PmagSpecRec dictionary
#
s_meas=pmag.get_dictitem(meas_data,'er_specimen_name',s,'T') # fish out this specimen
s_meas=pmag.get_dictitem(s_meas,'magic_method_codes','Z','has') # fish out zero field steps
if len(s_meas)>0:
for rec in s_meas: # fix up a few things for the output record
PmagSpecRec["magic_instrument_codes"]=rec["magic_instrument_codes"] # copy over instruments
PmagSpecRec["er_citation_names"]="This study"
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
locname=rec['er_location_name']
if 'er_expedition_name' in rec.keys(): PmagSpecRec["er_expedition_name"]=rec["er_expedition_name"]
PmagSpecRec["magic_method_codes"]=rec["magic_method_codes"]
if "magic_experiment_name" not in rec.keys():
PmagSpecRec["magic_experiment_names"]=""
else:
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
break
#
# find the data from the meas_data file for this specimen
#
data,units=pmag.find_dmag_rec(s,meas_data)
PmagSpecRec["measurement_step_unit"]= units
u=units.split(":")
if "T" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-AF"
if "K" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-T"
if "J" in units:PmagSpecRec["magic_method_codes"]=PmagSpecRec["magic_method_codes"]+":LP-DIR-M"
#
# find prior interpretation
#
if len(CurrRecs)==0: # check if already in
beg_pca,end_pca="",""
calculation_type=""
if inspec !="":
if verbose: print " looking up previous interpretations..."
precs=pmag.get_dictitem(PriorRecs,'er_specimen_name',s,'T') # get all the prior recs with this specimen name
precs=pmag.get_dictitem(precs,'magic_method_codes','LP-DIR','has') # get the directional data
PriorRecs=pmag.get_dictitem(PriorRecs,'er_specimen_name',s,'F') # take them all out of prior recs
# get the ones that meet the current coordinate system
for prec in precs:
if 'specimen_tilt_correction' not in prec.keys() or prec['specimen_tilt_correction']=='-1':
crd='s'
elif prec['specimen_tilt_correction']=='0':
crd='g'
elif prec['specimen_tilt_correction']=='100':
crd='t'
else:
crd='?'
CurrRec={}
for key in prec.keys():CurrRec[key]=prec[key]
CurrRecs.append(CurrRec) # put in CurrRecs
method_codes= CurrRec["magic_method_codes"].replace(" ","").split(':')
calculation_type='DE-BFL'
if 'DE-FM' in method_codes: calculation_type='DE-FM'
if 'DE-BFP' in method_codes: calculation_type='DE-BFP'
if 'DE-BFL-A' in method_codes: calculation_type='DE-BFL-A'
if 'specimen_dang' not in CurrRec.keys():
if verbose:print 'Run mk_redo.py and zeq_magic_redo.py to get the specimen_dang values'
CurrRec['specimen_dang']=-1
if calculation_type!='DE-FM' and crd==coord: # not a fisher mean
if verbose:print "Specimen N MAD DANG start end dec inc type component coordinates"
if units=='K':
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["specimen_dang"]),float(CurrRec["measurement_step_min"])-273,float(CurrRec["measurement_step_max"])-273,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
elif units=='T':
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["specimen_dang"]),float(CurrRec["measurement_step_min"])*1e3,float(CurrRec["measurement_step_max"])*1e3,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
elif 'T' in units and 'K' in units:
if float(CurrRec['measurement_step_min'])<1.0 :
min=float(CurrRec['measurement_step_min'])*1e3
else:
min=float(CurrRec['measurement_step_min'])-273
if float(CurrRec['measurement_step_max'])<1.0 :
max=float(CurrRec['measurement_step_max'])*1e3
else:
max=float(CurrRec['measurement_step_max'])-273
if verbose:print '%s %i %7.1f %i %i %7.1f %7.1f %7.1f, %s %s\n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec['specimen_dang']),min,max,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,crd)
elif 'J' in units:
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec['specimen_dang']),float(CurrRec["measurement_step_min"]),float(CurrRec["measurement_step_max"]),float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
elif calculation_type=='DE-FM' and crd==coord: # fisher mean
if verbose:print "Specimen a95 DANG start end dec inc type component coordinates"
if units=='K':
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),float(CurrRec["measurement_step_min"])-273,float(CurrRec["measurement_step_max"])-273,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
elif units=='T':
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),float(CurrRec["measurement_step_min"])*1e3,float(CurrRec["measurement_step_max"])*1e3,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
elif 'T' in units and 'K' in units:
if float(CurrRec['measurement_step_min'])<1.0 :
min=float(CurrRec['measurement_step_min'])*1e3
else:
min=float(CurrRec['measurement_step_min'])-273
if float(CurrRec['measurement_step_max'])<1.0 :
max=float(CurrRec['measurement_step_max'])*1e3
else:
max=float(CurrRec['measurement_step_max'])-273
if verbose:print '%s %i %7.1f %i %i %7.1f %7.1f %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_alpha95"]),min,max,float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,crd)
elif 'J' in units:
if verbose:print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %s %s %s \n' % (CurrRec["er_specimen_name"],int(CurrRec["specimen_n"]),float(CurrRec["specimen_mad"]),float(CurrRec["measurement_step_min"]),float(CurrRec["measurement_step_max"]),float(CurrRec["specimen_dec"]),float(CurrRec["specimen_inc"]),calculation_type,CurrRec['specimen_comp_name'],crd)
if len(CurrRecs)==0:beg_pca,end_pca="",""
datablock=data
noskip=1
if len(datablock) <3:
noskip=0
if backup==0:
k+=1
else:
k-=1
if len(CurrRecs)>0:
for rec in CurrRecs:
PriorRecs.append(rec)
CurrRecs=[]
else:
backup=0
if noskip:
#
# find replicate measurements at given treatment step and average them
#
# step_meth,avedata=pmag.vspec(data)
# if len(avedata) != len(datablock):
# if doave==1:
# method_codes.append("DE-VM")
# datablock=avedata
# #
# do geo or stratigraphic correction now
#
if geo==1:
#
# find top priority orientation method
orient,az_type=pmag.get_orient(samp_data,PmagSpecRec["er_sample_name"])
if az_type=='SO-NO':
if verbose: print "no orientation data for ",s
orient["sample_azimuth"]=0
orient["sample_dip"]=0
noorient=1
method_codes.append("SO-NO")
orient["sample_azimuth"]=0
orient["sample_dip"]=0
orient["sample_bed_dip_azimuth"]=0
orient["sample_bed_dip"]=0
noorient=1
method_codes.append("SO-NO")
else:
noorient=0
#
# if stratigraphic selected, get stratigraphic correction
#
tiltblock,geoblock=[],[]
for rec in datablock:
d_geo,i_geo=pmag.dogeo(rec[1],rec[2],float(orient["sample_azimuth"]),float(orient["sample_dip"]))
geoblock.append([rec[0],d_geo,i_geo,rec[3],rec[4],rec[5],rec[6]])
if tilt==1 and "sample_bed_dip" in orient.keys() and float(orient['sample_bed_dip'])!=0:
d_tilt,i_tilt=pmag.dotilt(d_geo,i_geo,float(orient["sample_bed_dip_direction"]),float(orient["sample_bed_dip"]))
tiltblock.append([rec[0],d_tilt,i_tilt,rec[3],rec[4],rec[5],rec[6]])
if tilt==1: plotblock=tiltblock
if geo==1 and tilt==0:plotblock=geoblock
if geo==0 and tilt==0: plotblock=datablock
#
# set the end pca point to last point if not set
if e==0 or e>len(plotblock)-1: e=len(plotblock)-1
if angle=="": angle=plotblock[0][1] # rotate to NRM declination
title=s+'_s'
if geo==1 and tilt==0 and noorient!=1:title=s+'_g'
if tilt==1 and noorient!=1:title=s+'_t'
pmagplotlib.plotZED(ZED,plotblock,angle,title,units)
if verbose:pmagplotlib.drawFIGS(ZED)
if len(CurrRecs)!=0:
for prec in CurrRecs:
if 'calculation_type' not in prec.keys():
calculation_type=''
else:
calculation_type=prec["calculation_type"]
direction_type=prec["specimen_direction_type"]
if calculation_type !="":
beg_pca,end_pca="",""
for j in range(len(datablock)):
if data[j][0]==float(prec["measurement_step_min"]):beg_pca=j
if data[j][0]==float(prec["measurement_step_max"]):end_pca=j
if beg_pca=="" or end_pca=="":
if verbose:
print "something wrong with prior interpretation "
break
if calculation_type!="":
if beg_pca=="":beg_pca=0
if end_pca=="":end_pca=len(plotblock)-1
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plotDir(ZED,mpars,geoblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plotDir(ZED,mpars,tiltblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
pmagplotlib.plotDir(ZED,mpars,plotblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
#
# print out data for this sample to screen
#
recnum=0
for plotrec in plotblock:
if units=='T' and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2],plotrec[6])
if units=="K" and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2],plotrec[6])
if units=="J" and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0],' J',plotrec[3],plotrec[1],plotrec[2],plotrec[6])
if 'K' in units and 'T' in units:
if plotrec[0]>=1. and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2],plotrec[6])
if plotrec[0]<1. and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f %s' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2],plotrec[6])
recnum += 1
if specimen!="":
if plot_file=="":
basename=locname+'_'+s
else:
basename=plot_file
files={}
for key in ZED.keys():
files[key]=basename+'_'+key+'.'+fmt
pmagplotlib.saveP(ZED,files)
sys.exit()
else: # interactive
if plots==0:
ans='b'
k+=1
changeS=0
while ans != "":
if len(CurrRecs)==0:
print """
g/b: indicates good/bad measurement. "bad" measurements excluded from calculation
set s[a]ve plot, [b]ounds for pca and calculate, [p]revious, [s]pecimen,
change [h]orizontal projection angle, change [c]oordinate systems,
[e]dit data, [q]uit:
"""
else:
print """
g/b: indicates good/bad measurement. "bad" measurements excluded from calculation
set s[a]ve plot, [b]ounds for pca and calculate, [p]revious, [s]pecimen,
change [h]orizontal projection angle, change [c]oordinate systems,
[d]elete current interpretation(s), [e]dit data, [q]uit:
"""
ans=raw_input('<Return> for next specimen \n')
setangle=0
if ans=='d': # delete this interpretation
CurrRecs=[]
k-=1 # replot same specimen
ans=""
changeS=1
if ans=='q':
if changeM==1:
ans=raw_input('Save changes to magic_measurements.txt? y/[n] ')
if ans=='y':
pmag.magic_write(meas_file,meas_data,'magic_measurements')
print "Good bye"
sys.exit()
if ans=='a':
if plot_file=="":
basename=locname+'_'+s+'_'
else:
basename=plot_file
files={}
for key in ZED.keys():
files[key]=basename+'_'+coord+'_'+key+'.'+fmt
pmagplotlib.saveP(ZED,files)
ans=""
if ans=='p':
k-=2
ans=""
backup=1
if ans=='c':
k-=1 # replot same block
if tilt==0 and geo ==1:print "You are currently viewing geographic coordinates "
if tilt==1 and geo ==1:print "You are currently viewing stratigraphic coordinates "
if tilt==0 and geo ==0: print "You are currently viewing sample coordinates "
print "\n Which coordinate system do you wish to view? "
coord=raw_input(" <Return> specimen, [g] geographic, [t] tilt corrected ")
if coord=="g":geo,tilt=1,0
if coord=="t":
geo=1
tilt=1
if coord=="":
coord='s'
geo=0
tilt=0
if geo==1 and sfile=="":
samp_file=raw_input(" Input er_samples file for sample orientations [er_samples.txt] " )
if samp_file=="":samp_file="er_samples.txt"
samp_data,file_type=pmag.magic_read(samp_file)
if file_type != 'er_samples':
print file_type
print "This is not a valid er_samples file - coordinate system not changed"
else:
sfile="ok"
ans=""
if ans=='s':
keepon=1
sample=raw_input('Enter desired specimen name (or first part there of): ')
while keepon==1:
try:
k =sids.index(sample)
keepon=0
except:
tmplist=[]
for qq in range(len(sids)):
if sample in sids[qq]:tmplist.append(sids[qq])
print sample," not found, but this was: "
print tmplist
sample=raw_input('Select one or try again\n ')
angle,direction_type="",""
setangle=0
ans=""
if ans=='h':
k-=1
angle=raw_input("Enter desired declination for X axis 0-360 ")
angle=float(angle)
if angle==0:angle=0.001
s=sids[k]
setangle=1
ans=""
if ans=='e':
k-=1
ans=""
recnum=0
for plotrec in plotblock:
if plotrec[0]<=200 and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f ' % (plotrec[5], recnum,plotrec[0]*1e3," mT",plotrec[3],plotrec[1],plotrec[2])
if plotrec[0]>200 and verbose: print '%s: %i %7.1f %s %8.3e %7.1f %7.1f ' % (plotrec[5], recnum,plotrec[0]-273,' C',plotrec[3],plotrec[1],plotrec[2])
recnum += 1
answer=raw_input('Enter index of point to change from bad to good or vice versa: ')
try:
ind=int(answer)
meas_data=pmag.mark_dmag_rec(s,ind,meas_data)
changeM=1
except:
'bad entry, try again'
if ans=='b':
if end_pca=="":end_pca=len(plotblock)-1
if beg_pca=="":beg_pca=0
k-=1 # stay on same sample until through
GoOn=0
while GoOn==0:
print 'Enter index of first point for pca: ','[',beg_pca,']'
answer=raw_input('return to keep default ')
if answer != "":
beg_pca=int(answer)
print 'Enter index of last point for pca: ','[',end_pca,']'
answer=raw_input('return to keep default ')
try:
end_pca=int(answer)
if plotblock[beg_pca][5]=='b' or plotblock[end_pca][5]=='b':
print "Can't select 'bad' measurement for PCA bounds -try again"
end_pca=len(plotblock)-1
beg_pca=0
elif beg_pca >=0 and beg_pca<=len(plotblock)-2 and end_pca>0 and end_pca<len(plotblock):
GoOn=1
else:
print beg_pca,end_pca, " are bad entry of indices - try again"
end_pca=len(plotblock)-1
beg_pca=0
except:
print beg_pca,end_pca, " are bad entry of indices - try again"
end_pca=len(plotblock)-1
beg_pca=0
GoOn=0
while GoOn==0:
if calculation_type!="":
print "Prior calculation type = ",calculation_type
ct=raw_input('Enter new Calculation Type: best-fit line, plane or fisher mean [l]/p/f : ' )
if ct=="" or ct=="l":
direction_type="l"
calculation_type="DE-BFL"
GoOn=1
elif ct=='p':
direction_type="p"
calculation_type="DE-BFP"
GoOn=1
elif ct=='f':
direction_type="l"
calculation_type="DE-FM"
GoOn=1
else:
print "bad entry of calculation type: try again. "
pmagplotlib.plotZED(ZED,plotblock,angle,s,units)
if verbose:pmagplotlib.drawFIGS(ZED)
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
if "SO-NO" not in method_codes:
PmagSpecRec["specimen_tilt_correction"]='0'
method_codes.append("DA-DIR-GEO")
else:
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plotDir(ZED,mpars,geoblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
if "SO-NO" not in method_codes:
PmagSpecRec["specimen_tilt_correction"]='100'
method_codes.append("DA-DIR-TILT")
else:
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plotDir(ZED,mpars,tiltblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars['specimen_direction_type']=='Error':break
PmagSpecRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
PmagSpecRec["specimen_tilt_correction"]='-1'
pmagplotlib.plotDir(ZED,mpars,plotblock,angle)
if verbose:pmagplotlib.drawFIGS(ZED)
PmagSpecRec["measurement_step_min"]='%8.3e ' %(mpars["measurement_step_min"])
PmagSpecRec["measurement_step_max"]='%8.3e ' %(mpars["measurement_step_max"])
PmagSpecRec["specimen_correction"]='u'
PmagSpecRec["specimen_dang"]='%7.1f ' %(mpars['specimen_dang'])
print 'DANG: ',PmagSpecRec["specimen_dang"]
if calculation_type!='DE-FM':
PmagSpecRec["specimen_mad"]='%7.1f ' %(mpars["specimen_mad"])
PmagSpecRec["specimen_alpha95"]=""
else:
PmagSpecRec["specimen_alpha95"]='%7.1f ' %(mpars["specimen_alpha95"])
PmagSpecRec["specimen_mad"]=""
PmagSpecRec["specimen_n"]='%i ' %(mpars["specimen_n"])
PmagSpecRec["specimen_direction_type"]=direction_type
PmagSpecRec["calculation_type"]=calculation_type # redundant and won't be imported - just for convenience
method_codes=PmagSpecRec["magic_method_codes"].split(':')
if len(method_codes) != 0:
methstring=""
for meth in method_codes:
ctype=meth.split('-')
if 'DE' not in ctype:methstring=methstring+ ":" +meth # don't include old direction estimation methods
methstring=methstring+':'+calculation_type
PmagSpecRec["magic_method_codes"]= methstring.strip(':')
print 'Method codes: ',PmagSpecRec['magic_method_codes']
if calculation_type!='DE-FM':
if units=='K':
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])-273,float(PmagSpecRec["measurement_step_max"])-273,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
elif units== 'T':
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])*1e3,float(PmagSpecRec["measurement_step_max"])*1e3,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
elif 'T' in units and 'K' in units:
if float(PmagSpecRec['measurement_step_min'])<1.0 :
min=float(PmagSpecRec['measurement_step_min'])*1e3
else:
min=float(PmagSpecRec['measurement_step_min'])-273
if float(PmagSpecRec['measurement_step_max'])<1.0 :
max=float(PmagSpecRec['measurement_step_max'])*1e3
else:
max=float(PmagSpecRec['measurement_step_max'])-273
print '%s %i %7.1f %i %i %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),min,max,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
else:
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_mad"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"]),float(PmagSpecRec["measurement_step_max"]),float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
else:
if 'K' in units:
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])-273,float(PmagSpecRec["measurement_step_max"])-273,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
elif 'T' in units:
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["specimen_dang"]),float(PmagSpecRec["measurement_step_min"])*1e3,float(PmagSpecRec["measurement_step_max"])*1e3,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
elif 'T' in units and 'K' in units:
if float(PmagSpecRec['measurement_step_min'])<1.0 :
min=float(PmagSpecRec['measurement_step_min'])*1e3
else:
min=float(PmagSpecRec['measurement_step_min'])-273
if float(PmagSpecRec['measurement_step_max'])<1.0 :
max=float(PmagSpecRec['measurement_step_max'])*1e3
else:
max=float(PmagSpecRec['measurement_step_max'])-273
print '%s %i %7.1f %i %i %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),min,max,float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
else:
print '%s %i %7.1f %7.1f %7.1f %7.1f %7.1f, %s \n' % (PmagSpecRec["er_specimen_name"],int(PmagSpecRec["specimen_n"]),float(PmagSpecRec["specimen_alpha95"]),float(PmagSpecRec["measurement_step_min"]),float(PmagSpecRec["measurement_step_max"]),float(PmagSpecRec["specimen_dec"]),float(PmagSpecRec["specimen_inc"]),calculation_type)
saveit=raw_input("Save this interpretation? [y]/n \n")
if saveit!="n":
changeS=1
#
# put in details
#
angle,direction_type,setangle="","",0
if len(CurrRecs)>0:
replace=raw_input(" [0] add new component, or [1] replace existing interpretation(s) [default is replace] ")
if replace=="1" or replace=="":
CurrRecs=[]
PmagSpecRec['specimen_comp_name']='A'
CurrRecs.append(PmagSpecRec)
else:
print 'These are the current component names for this specimen: '
for trec in CurrRecs:print trec['specimen_comp_name']
compnum=raw_input("Enter new component name: ")
PmagSpecRec['specimen_comp_name']=compnum
print "Adding new component: ",PmagSpecRec['specimen_comp_name']
CurrRecs.append(PmagSpecRec)
else:
PmagSpecRec['specimen_comp_name']='A'
CurrRecs.append(PmagSpecRec)
k+=1
ans=""
else:
ans=""
else: # plots=1
k+=1
files={}
locname.replace('/','-')
print PmagSpecRec
for key in ZED.keys():
files[key]="LO:_"+locname+'_SI:_'+PmagSpecRec['er_site_name']+'_SA:_'+PmagSpecRec['er_sample_name']+'_SP:_'+s+'_CO:_'+coord+'_TY:_'+key+'_.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['demag']='DeMag Plot'
titles['zijd']='Zijderveld Plot'
titles['eqarea']='Equal Area Plot'
ZED = pmagplotlib.addBorders(ZED,titles,black,purple)
pmagplotlib.saveP(ZED,files)
if len(CurrRecs)>0:
for rec in CurrRecs: PriorRecs.append(rec)
if changeS==1:
if len(PriorRecs)>0:
save_redo(PriorRecs,inspec)
else:
os.system('rm '+inspec)
CurrRecs,beg_pca,end_pca=[],"","" # next up
changeS=0
else: k+=1 # skip record - not enough data
if changeM==1:
pmag.magic_write(meas_file,meas_data,'magic_measurements')
if __name__ == "__main__":
main()
| bsd-3-clause |
openfisca/openfisca-survey-manager | openfisca_survey_manager/utils.py | 1 | 8315 |
import logging
import os
import pandas as pd
from openfisca_core import periods
from openfisca_core.parameters import ParameterNode, Scale
log = logging.getLogger(__name__)
def inflate_parameters(parameters, inflator, base_year, last_year = None, ignore_missing_units = False):
if (last_year is not None) and (last_year > base_year + 1):
for year in range(base_year + 1, last_year + 1):
inflate_parameters(parameters, inflator, year - 1, last_year = year, ignore_missing_units = ignore_missing_units)
else:
if last_year is None:
last_year = base_year + 1
assert last_year == base_year + 1
for sub_parameter in parameters.children.values():
if isinstance(sub_parameter, ParameterNode):
inflate_parameters(sub_parameter, inflator, base_year, last_year, ignore_missing_units = ignore_missing_units)
else:
acceptable_units = [
'rate_unit',
'threshold_unit',
'unit',
]
if ignore_missing_units:
if not hasattr(sub_parameter, 'metadata'):
continue
# Empty intersection: not unit present in metadata
if not bool(set(sub_parameter.metadata.keys()) & set(acceptable_units)):
continue
assert hasattr(sub_parameter, 'metadata'), "{} doesn't have metadata".format(sub_parameter.name)
unit_types = set(sub_parameter.metadata.keys()).intersection(set(acceptable_units))
assert unit_types, "No admissible unit in metadata for parameter {}. You may consider using the option 'ignore_missing_units' from the inflate_paramaters() function.".format(
sub_parameter.name)
if len(unit_types) > 1:
assert unit_types == set(['threshold_unit', 'rate_unit']), \
"Too much admissible units in metadata for parameter {}".format(
sub_parameter.name)
unit_by_type = dict([
(unit_type, sub_parameter.metadata[unit_type]) for unit_type in unit_types
])
for unit_type in unit_by_type.keys():
if sub_parameter.metadata[unit_type].startswith("currency"):
inflate_parameter_leaf(sub_parameter, base_year, inflator, unit_type = unit_type)
def inflate_parameter_leaf(sub_parameter, base_year, inflator, unit_type = 'unit'):
"""
Inflate a Parameter leaf according to unit type
Basic unit type are supposed by default
Other admissible unit types are threshold_unit and rate_unit
"""
if isinstance(sub_parameter, Scale):
if unit_type == 'threshold_unit':
for bracket in sub_parameter.brackets:
threshold = bracket.children['threshold']
inflate_parameter_leaf(threshold, base_year, inflator)
return
else:
# Remove new values for year > base_year
kept_instants_str = [
parameter_at_instant.instant_str
for parameter_at_instant in sub_parameter.values_list
if periods.instant(parameter_at_instant.instant_str).year <= base_year
]
if not kept_instants_str:
return
last_admissible_instant_str = max(kept_instants_str)
sub_parameter.update(
start = last_admissible_instant_str,
value = sub_parameter(last_admissible_instant_str)
)
restricted_to_base_year_value_list = [
parameter_at_instant for parameter_at_instant in sub_parameter.values_list
if periods.instant(parameter_at_instant.instant_str).year == base_year
]
# When value is changed in the base year
if restricted_to_base_year_value_list:
for parameter_at_instant in reversed(restricted_to_base_year_value_list):
if parameter_at_instant.instant_str.startswith(str(base_year)):
value = (
parameter_at_instant.value * (1 + inflator)
if parameter_at_instant.value is not None
else None
)
sub_parameter.update(
start = parameter_at_instant.instant_str.replace(
str(base_year), str(base_year + 1)
),
value = value,
)
# Or use the value at that instant even when it is defined earlier tahn the base year
else:
value = (
sub_parameter("{}-12-31".format(base_year)) * (1 + inflator)
if sub_parameter("{}-12-31".format(base_year)) is not None
else None
)
sub_parameter.update(
start = "{}-01-01".format(base_year + 1),
value = value
)
def asof(tax_benefit_system, instant):
parameters = tax_benefit_system.parameters
parameters_asof(parameters, instant)
variables_asof(tax_benefit_system, instant)
def leaf_asof(sub_parameter, instant):
kept_instants_str = [
parameter_at_instant.instant_str
for parameter_at_instant in sub_parameter.values_list
if periods.instant(parameter_at_instant.instant_str) <= instant
]
if not kept_instants_str:
sub_parameter.values_list = []
return
last_admissible_instant_str = max(kept_instants_str)
sub_parameter.update(
start = last_admissible_instant_str,
value = sub_parameter(last_admissible_instant_str)
)
return
def parameters_asof(parameters, instant):
if isinstance(instant, str):
instant = periods.instant(instant)
assert isinstance(instant, periods.Instant)
for sub_parameter in parameters.children.values():
if isinstance(sub_parameter, ParameterNode):
parameters_asof(sub_parameter, instant)
else:
if isinstance(sub_parameter, Scale):
for bracket in sub_parameter.brackets:
threshold = bracket.children['threshold']
rate = bracket.children.get('rate')
amount = bracket.children.get('amount')
leaf_asof(threshold, instant)
if rate:
leaf_asof(rate, instant)
if amount:
leaf_asof(amount, instant)
else:
leaf_asof(sub_parameter, instant)
def variables_asof(tax_benefit_system, instant, variables_list = []):
if isinstance(instant, str):
instant = periods.instant(instant)
assert isinstance(instant, periods.Instant)
if variables_list == []:
variables_list = tax_benefit_system.variables.keys()
for variable_name, variable in tax_benefit_system.variables.items():
if variable_name in variables_list:
formulas = variable.formulas
for instant_str in list(formulas.keys()):
if periods.instant(instant_str) > instant:
del formulas[instant_str]
if variable.end is not None:
if periods.instant(variable.end) >= instant:
variable.end = None
def stata_files_to_data_frames(data, period = None):
assert period is not None
period = periods.period(period)
stata_file_by_entity = data.get('stata_file_by_entity')
if stata_file_by_entity is None:
return
variables_from_stata_files = list()
input_data_frame_by_entity_by_period = dict()
input_data_frame_by_entity_by_period[periods.period(period)] = input_data_frame_by_entity = dict()
for entity, file_path in stata_file_by_entity.items():
assert os.path.exists(file_path), "Invalid file path: {}".format(file_path)
entity_data_frame = input_data_frame_by_entity[entity] = pd.read_stata(file_path)
variables_from_stata_files += list(entity_data_frame.columns)
data['input_data_frame_by_entity_by_period'] = input_data_frame_by_entity_by_period
return variables_from_stata_files
| agpl-3.0 |
jereze/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 57 | 8062 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
davebx/tools-iuc | tools/vsnp/vsnp_add_zero_coverage.py | 12 | 6321 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import pandas
import pysam
from Bio import SeqIO
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_coverage_df(bam_file):
# Create a coverage dictionary.
coverage_dict = {}
coverage_list = pysam.depth(bam_file, split_lines=True)
for line in coverage_list:
chrom, position, depth = line.split('\t')
coverage_dict["%s-%s" % (chrom, position)] = depth
# Convert it to a data frame.
coverage_df = pandas.DataFrame.from_dict(coverage_dict, orient='index', columns=["depth"])
return coverage_df
def get_zero_df(reference):
# Create a zero coverage dictionary.
zero_dict = {}
for record in SeqIO.parse(reference, "fasta"):
chrom = record.id
total_len = len(record.seq)
for pos in list(range(1, total_len + 1)):
zero_dict["%s-%s" % (str(chrom), str(pos))] = 0
# Convert it to a data frame with depth_x
# and depth_y columns - index is NaN.
zero_df = pandas.DataFrame.from_dict(zero_dict, orient='index', columns=["depth"])
return zero_df
def output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf):
column_names = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample"]
vcf_df = pandas.read_csv(vcf_file, sep='\t', header=None, names=column_names, comment='#')
good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])
if total_zero_coverage > 0:
header_file = "%s_header.csv" % base_file_name
with open(header_file, 'w') as outfile:
with open(vcf_file) as infile:
for line in infile:
if re.search('^#', line):
outfile.write("%s" % line)
vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]
vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]
vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + "-" + vcf_df_snp['POS'].map(str)
vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')
cat_df = pandas.concat([vcf_df_snp, zero_df], axis=1, sort=False)
cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])
cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')
cat_df['REF'] = cat_df['REF'].fillna('N')
cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')
cat_df['Sample'] = cat_df['Sample'].fillna('./.')
cat_df['temp'] = cat_df.index.str.rsplit('-', n=1)
cat_df[['CHROM', 'POS']] = pandas.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)
cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]
cat_df['POS'] = cat_df['POS'].astype(int)
cat_df = cat_df.sort_values(['CHROM', 'POS'])
body_file = "%s_body.csv" % base_file_name
cat_df.to_csv(body_file, sep='\t', header=False, index=False)
with open(output_vcf, "w") as outfile:
for cf in [header_file, body_file]:
with open(cf, "r") as infile:
for line in infile:
outfile.write("%s" % line)
else:
shutil.move(vcf_file, output_vcf)
return good_snp_count
def output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics):
bam_metrics = [base_file_name, "", "%4f" % average_coverage, genome_coverage]
vcf_metrics = [base_file_name, str(good_snp_count), "", ""]
metrics_columns = ["File", "Number of Good SNPs", "Average Coverage", "Genome Coverage"]
with open(output_metrics, "w") as fh:
fh.write("# %s\n" % "\t".join(metrics_columns))
fh.write("%s\n" % "\t".join(bam_metrics))
fh.write("%s\n" % "\t".join(vcf_metrics))
def output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics):
base_file_name = get_sample_name(vcf_file)
good_snp_count = output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf)
output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics)
def get_coverage_and_snp_count(bam_file, vcf_file, reference, output_metrics, output_vcf):
coverage_df = get_coverage_df(bam_file)
zero_df = get_zero_df(reference)
coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')
# depth_x "0" column no longer needed.
coverage_df = coverage_df.drop(columns=['depth_x'])
coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})
# Covert the NaN to 0 coverage and get some metrics.
coverage_df = coverage_df.fillna(0)
coverage_df['depth'] = coverage_df['depth'].apply(int)
total_length = len(coverage_df)
average_coverage = coverage_df['depth'].mean()
zero_df = coverage_df[coverage_df['depth'] == 0]
total_zero_coverage = len(zero_df)
total_coverage = total_length - total_zero_coverage
genome_coverage = "{:.2%}".format(total_coverage / total_length)
# Output a zero-coverage vcf fil and the metrics file.
output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bam_input', action='store', dest='bam_input', help='bam input file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=False, default=None, help='Output metrics text file')
parser.add_argument('--output_vcf', action='store', dest='output_vcf', required=False, default=None, help='Output VCF file')
parser.add_argument('--reference', action='store', dest='reference', help='Reference dataset')
parser.add_argument('--vcf_input', action='store', dest='vcf_input', help='vcf input file')
args = parser.parse_args()
get_coverage_and_snp_count(args.bam_input, args.vcf_input, args.reference, args.output_metrics, args.output_vcf)
| mit |
Barmaley-exe/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
0x0all/scikit-learn | sklearn/ensemble/gradient_boosting.py | 4 | 60082 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from warnings import warn
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length
from ..utils.extmath import logsumexp
from ..utils.stats import _weighted_percentile
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : np.ndarray, shape=(n, m)
The data array.
y : np.ndarray, shape=(n,)
The target labels.
residual : np.ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : np.ndarray, shape=(n,):
The predictions.
sample_weight : np.ndarray, shape=(n,):
The weight of each sample.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.abs(y - pred.ravel()))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
See
---
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
If the loss does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - 1.0 / (1.0 + np.exp(-pred.ravel()))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
See
---
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum()) * \
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
self.min_weight_fraction_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score -
loss_(y[~sample_mask], y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, call `fit` "
"before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recoveres the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self.staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
sudikrt/costproML | new Data/staticData/newmod.py | 3 | 1525 | from pandas import read_csv
from pandas import datetime
from pandas.tools.plotting import autocorrelation_plot
from pandas import DataFrame
from statsmodels.tsa.arima_model import ARIMA
from matplotlib import pyplot
import pandas as pd
import numpy as np
import statsmodels.api as sm
from pandas.tseries.offsets import *
def parser(x):
return datetime.strptime(x, '%Y-%m')
series = read_csv('outData.csv', header=0, parse_dates=[1], index_col=1, squeeze=True, date_parser=parser)
print(series.head())
series.plot()
pyplot.show()
model = ARIMA(series, order=(1,1,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
# plot residual errors
residuals = DataFrame(model_fit.resid)
residuals.plot()
pyplot.show()
residuals.plot(kind='kde')
pyplot.show()
print(residuals.describe())
'''
grouped = series.groupby ('job')
group = list(grouped)[0][1]
ts_data = pd.TimeSeries(group.maxsal.values, index=pd.to_datetime(group.index))
ts_log_data = np.log (ts_data)
#ts_log_data.plot (ax=axes[1], style='b-', label='actual')
model = sm.tsa.ARMA (ts_log_data, order=(1,1)).fit()
#print (model.params)
y_pred = model.predict (ts_log_data.index[0].isoformat(), ts_log_data.index[-1].isoformat())
start_date = ts_log_data.index[-1]+Day (1)
#start_date = pd.to_datetime("2012-07")
end_date = ts_log_data.index[-1] + Day()
#end_date = pd.to_datetime("2016-01 00:00:00")
y_forecast = model.predict(start_date.isoformat(), end_date.isoformat())
#print(y_forecast)
print(np.exp(y_forecast))
#series.plot()
pyplot.show()
'''
| apache-2.0 |
tosolveit/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
andyh616/mne-python | examples/preprocessing/plot_define_target_events.py | 19 | 3350 | """
============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
###############################################################################
# Find stimulus event followed by quick button presses
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
###############################################################################
# View evoked response
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
plt.clf()
ax = plt.subplot(2, 1, 1)
early.plot(axes=ax)
plt.title(title % 'late')
plt.ylabel('Evoked field (fT)')
ax = plt.subplot(2, 1, 2)
late.plot(axes=ax)
plt.title(title % 'early')
plt.ylabel('Evoked field (fT)')
plt.show()
| bsd-3-clause |
q1ang/tushare | tushare/datayes/subject.py | 10 | 32740 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Subject():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def SocialDataXQ(self, beginDate='', endDate='', ticker='', field=''):
"""
包含雪球社交统计数据,输入一个或多个证券交易代码、统计起止日期,获取该证券一段时间内每天的雪球帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQ%(beginDate, endDate, ticker, field))
return _ret_data(code, result)
def SocialDataXQByTicker(self, ticker='', field=''):
"""
包含按单只证券代码获取的雪球社交数据,输入一个证券交易代码,获取该证券每天的雪球帖子数量、及帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQBYTICKER%(ticker, field))
return _ret_data(code, result)
def SocialDataXQByDate(self, statisticsDate='', field=''):
"""
包含按单个统计日期获取的雪球社交数据,输入一个统计日期,获取当天雪球帖子涉及的所有证券、各证券雪球帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQBYDATE%(statisticsDate, field))
return _ret_data(code, result)
def NewsInfo(self, newsID='', field=''):
"""
包含新闻基本信息。输入新闻ID,获取新闻基本信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、每天新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFO%(newsID, field))
return _ret_data(code, result)
def NewsInfoByTime(self, newsPublishDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内的新闻基本信息。输入新闻发布的日期、起止时间,获取该时间段内的新闻相关信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFOBYTIME%(newsPublishDate, beginTime, endTime, field))
return _ret_data(code, result)
def NewsContent(self, newsID='', field=''):
"""
包含新闻全文等信息。输入新闻ID,获取新闻全文相关字段,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENT%(newsID, field))
return _ret_data(code, result)
def NewsContentByTime(self, newsPublishDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内的新闻全文等信息。输入新闻发布的日期、起止时间,获取该时间段内的新闻全文等信息,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENTBYTIME%(newsPublishDate, beginTime, endTime, field))
return _ret_data(code, result)
def CompanyByNews(self, newsID='', field=''):
"""
包含新闻关联的公司数据,同时可获取针对不同公司的新闻情感数据。输入新闻ID,获取相关的公司信息,如:公司代码、公司全称,同时返回新闻标题、发布时间、入库时间信息。其中,公司代码可继续通过证券编码及基本上市信息(getSecID)查找公司相关的证券。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.COMPANYBYNEWS%(newsID, field))
return _ret_data(code, result)
def NewsByCompany(self, partyID='', beginDate='', endDate='', field=''):
"""
包含公司关联的新闻数据,同时可获取针对不同公司的新闻情感数据。输入公司代码、查询的新闻发布起止时间,获取相关的新闻信息,如:新闻ID、新闻标题、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSBYCOMPANY%(partyID, beginDate, endDate, field))
return _ret_data(code, result)
def TickersByNews(self, newsID='', field=''):
"""
包含新闻相关的证券数据,同时可获取针对不同证券的新闻情感数据。输入新闻ID,获取相关的证券信息,如:证券代码、证券简称、证券交易场所,同时返回新闻标题、发布来源、发布时间、入库时间等新闻相关信息。每天更新。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.TICKERSBYNEWS%(newsID, field))
return _ret_data(code, result)
def NewsByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券相关的新闻数据,同时可获取针对不同证券的新闻情感数据。输入证券代码或简称、查询的新闻发布起止时间,同时可输入证券交易所代码,获取相关新闻数据,如:新闻ID、新闻标题、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def ThemesContent(self, isMain='', themeID='', themeName='', themeSource='', field=''):
"""
包含所有主题基本信息。输入主题代码或名称、主题来源,可以获取主题相关信息,包括主题ID、主题名称、主题描述、主题来源、当天是否活跃、主题插入时间、主题更新时间等。(注:1、主题基期自2011/4/16始;2、数据按日更新主题活跃状态。)
"""
code, result = self.client.getData(vs.THEMESCONTENT%(isMain, themeID, themeName, themeSource, field))
return _ret_data(code, result)
def TickersByThemes(self, themeID='', themeName='', beginDate='', endDate='', isNew='', field=''):
"""
包含主题关联的证券数据。输入主题代码或名称,可以获取主题关联的证券等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取主题在该时间段内关联的证券信息。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新、同时刷新关联状态。)
"""
code, result = self.client.getData(vs.TICKERSBYTHEMES%(themeID, themeName, beginDate, endDate, isNew, field))
return _ret_data(code, result)
def ThemesTickersInsert(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
获取一段时间内主题新增的关联证券数据,输入主题代码或名称、查询起止时间,可以获取该时间段内主题新增的关联证券信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESTICKERSINSERT%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def ThemesTickersDelete(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
获取一段时间内主题删除的关联证券数据,输入主题代码或名称、查询起止时间,可以获取该时间段内主题删除的关联证券信息,包括证券代码、证券简称、证券交易场所,同时返回关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESTICKERSDELETE%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def ThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def ThemesPeriod(self, isLatest='', themeID='', themeName='', field=''):
"""
包含主题活跃周期数据。输入主题代码或名称,获取主题的活跃时间等信息,同时可输入是否最新活跃期,获取主题最新的活跃周期。(注:1、主题活跃周期数据自2013/1/1始;2、新闻量在某段时间内达到活跃阈值的主题即为活跃主题;3、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESPERIOD%(isLatest, themeID, themeName, field))
return _ret_data(code, result)
def ActiveThemes(self, date='', field=''):
"""
获取某天活跃的主题数据。输入一个日期,获取在该日期活跃的主题。(注:1、主题活跃周期数据自2013/1/1始;2、新闻量在某段时间内达到活跃阈值的主题即为活跃主题;3、数据按日更新。)
"""
code, result = self.client.getData(vs.ACTIVETHEMES%(date, field))
return _ret_data(code, result)
def ThemesSimilarity(self, themeID='', themeName='', field=''):
"""
获取与某主题相似的其他主题数据。输入主题代码或名称,可以获取相似的主题信息,包括相似主题代码、相似主题名称、主题文本的相似度、主题关联证券的相似度。数据按日更新。
"""
code, result = self.client.getData(vs.THEMESSIMILARITY%(themeID, themeName, field))
return _ret_data(code, result)
def ThemesHeat(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
包含主题的热度数据。输入主题代码或名称、同时可输入起止日期,获取一段时间内主题每天的新闻数量、主题热度(即主题每天新闻数量占当日所有主题新闻总量的百分比(%))。(注:数据自2014/1/1始,每天更新)
"""
code, result = self.client.getData(vs.THEMESHEAT%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def SectorThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据,主题源自申万行业。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、源自行业的主题与证券的关联自2014/12/26始;2、数据按日更新、同时刷新关联状态。)
"""
code, result = self.client.getData(vs.SECTORTHEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def WebThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据,主题源自网络。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、源自网络的主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.WEBTHEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def NewsHeatIndex(self, beginDate='', endDate='', exchangeCD='', secID='', secShortName='', ticker='', field=''):
"""
包含证券相关的新闻热度指数数据,输入一个或多个证券交易代码、起止日期,获取该证券一段时间内的新闻热度指数(即证券当天关联新闻数量占当天新闻总量的百分比(%))。每天更新。(注:1、2014/1/1起新闻来源众多、指数统计有效,2013年及之前的网站来源不全、数据波动大,数据自2004/10/28始;2、新闻量的统计口径为经算法处理后证券关联到的所有常规新闻;3、数据按日更新。)
"""
code, result = self.client.getData(vs.NEWSHEATINDEX%(beginDate, endDate, exchangeCD, secID, secShortName, ticker, field))
return _ret_data(code, result)
def NewsSentimentIndex(self, beginDate='', endDate='', exchangeCD='', secID='', secShortName='', ticker='', field=''):
"""
包含证券相关的新闻情感指数数据,输入一个或多个证券交易代码、起止日期,获取该证券一段时间内的新闻情感指数(即当天证券关联新闻的情感均值)。(注:1、2014/1/1起新闻来源众多、指数统计有效,2013年及之前的网站来源不全、数据波动大,数据自2004/10/28始;2、新闻量的统计口径为经算法处理后证券关联到的所有常规新闻;3、数据按日更新。)
"""
code, result = self.client.getData(vs.NEWSSENTIMENTINDEX%(beginDate, endDate, exchangeCD, secID, secShortName, ticker, field))
return _ret_data(code, result)
def ReportByTicker(self, ticker='', beginDate='', endDate='', field=''):
"""
根据证券代码获取相应公告分类结果,输入一个或多个证券交易代码,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告分类结果、公告分类入库时间、更新时间。(注:公告分类数据自2009/1/5始,按日更新)
"""
code, result = self.client.getData(vs.REPORTBYTICKER%(ticker, beginDate, endDate, field))
return _ret_data(code, result)
def ReportByCategory(self, beginDate='', Category='', endDate='', field=''):
"""
根据公告分类获取相应公告信息,输入一个或多个公告分类,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告所属分类、公告分类入库时间、更新时间。(注:公告分类数据自2009/1/5始,按日更新)
"""
code, result = self.client.getData(vs.REPORTBYCATEGORY%(beginDate, Category, endDate, field))
return _ret_data(code, result)
def ReportContent(self, ticker='', beginDate='', endDate='', field=''):
"""
根据证券代码获取公告内容,输入一个或多个证券交易代码,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告具体内容、公告链接、公告入库时间。(注:公告数据自2000/1/8始,按日更新)
"""
code, result = self.client.getData(vs.REPORTCONTENT%(ticker, beginDate, endDate, field))
return _ret_data(code, result)
def ActiveThemesInsert(self, beginDate='', endDate='', isLatest='', themeSource='', field=''):
"""
获取一段时间内新增(开始)的活跃主题数据,输入的时间参数在主题活跃周期的起始时间列进行查询。输入查询起止时间、是否最新活跃期、主题来源,可以获取该时间段内开始活跃的主题信息,包括主题ID、主题名称、主题开始时间、主题结束时间、是否最新活跃期、数据入库及更新时间。(注:1、主题活跃周期数据自2013/1/1始;2、数据按日更新。)
"""
code, result = self.client.getData(vs.ACTIVETHEMESINSERT%(beginDate, endDate, isLatest, themeSource, field))
return _ret_data(code, result)
def ActiveThemesDelete(self, beginDate='', endDate='', isLatest='', themeSource='', field=''):
"""
获取一段时间内删除(退出)的活跃主题数据,输入的时间参数在主题活跃周期的结束时间列进行查询。输入查询起止时间、是否最新活跃期、主题来源,可以获取该时间段内停止活跃的主题信息,包括主题ID、主题名称、主题开始时间、主题结束时间、是否最新活跃期、数据入库及更新时间。(注:1、主题活跃周期数据自2013/1/1始;2、数据按日更新。3、查询当天无活跃主题被删除、需等第二天9:00之后获取前一天停止活跃的主题数据。)
"""
code, result = self.client.getData(vs.ACTIVETHEMESDELETE%(beginDate, endDate, isLatest, themeSource, field))
return _ret_data(code, result)
def ThemesCluster(self, isMain='', themeID='', themeName='', field=''):
"""
获取当天活跃主题聚类对应关系数据。输入聚类后的主要主题代码或名称,可以获取同一类别的主题相关信息,包括主题ID、主题名称、主题插入时间、主题更新时间等。(注:1、可先在主题基本信息(getThemesContent)这个API中获取当天聚类后的主题;2、可输入isMain=0,在返回的数据中剔除主题自身的对应;3、数据每天刷新,只能获取当天数据。)
"""
code, result = self.client.getData(vs.THEMESCLUSTER%(isMain, themeID, themeName, field))
return _ret_data(code, result)
def ThemesByNews(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWS%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesByNewsCompanyRel(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,只包含与公司相关的新闻。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSCOMPANYREL%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesInsertDB(self, beginDate='', endDate='', themeSource='', field=''):
"""
获取一段时间内新入库的主题数据。输入查询起止时间,可以获取该时间段内新入库的主题信息,包括主题ID、主题名称、主题描述、主题来源、当天是否活跃、主题插入时间、主题更新时间等。(注:1、主题基期自2011/4/16始;2、数据按日更新主题活跃状态。)
"""
code, result = self.client.getData(vs.THEMESINSERTDB%(beginDate, endDate, themeSource, field))
return _ret_data(code, result)
def ThemesByNewsLF(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,该API以获取新闻关联的主题(getThemesByNews)为基础、进行过滤优化。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSLF%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesByNewsMF(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,该API以获取新闻关联的主题(优化后)(getThemesByNewsLF)为基础、再次进行过滤优化,是所有获取新闻关联的主题API中最严格的优化结果、数据量也最少。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSMF%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def NewsInfoByInsertTime(self, newsInsertDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内入库的新闻基本信息。输入新闻入库的日期、起止时间,获取该时间段内新入库的新闻相关信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFOBYINSERTTIME%(newsInsertDate, beginTime, endTime, field))
return _ret_data(code, result)
def NewsContentByInsertTime(self, newsInsertDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内入库的新闻全文等信息。输入新闻入库的日期、起止时间,获取该时间段内新入库的新闻全文等信息,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENTBYINSERTTIME%(newsInsertDate, beginTime, endTime, field))
return _ret_data(code, result)
def SocialDataGuba(self, beginDate='', endDate='', ticker='', field=''):
"""
包含证券在股吧社交中的热度统计数据,输入一个或多个证券交易代码、统计起止日期,该证券在一段时间内每天相关的股吧帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAGUBA%(beginDate, endDate, ticker, field))
return _ret_data(code, result)
def SocialThemeDataGuba(self, beginDate='', endDate='', themeID='', field=''):
"""
包含主题在股吧社交中的热度统计数据,输入一个或多个主题代码、统计起止日期,获取该主题在一段时间内每天相关的股吧帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALTHEMEDATAGUBA%(beginDate, endDate, themeID, field))
return _ret_data(code, result)
def ThemesByNewsTime(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIME%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeCompanyRel(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,只包含与公司相关的新闻。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMECOMPANYREL%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeLF(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,该API以获取新闻关联的主题(getThemesByNewsTime)为基础、进行过滤优化。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMELF%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeMF(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,该API以获取新闻关联的主题(优化后)(getThemesByNewsTimeLF)为基础、再次进行过滤优化,是所有获取新闻关联的主题API中最严格的优化结果、数据量也最少。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMEMF%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ReportContentByID(self, reportID='', field=''):
"""
根据公告ID获取公告原始内容数据,输入公告ID,获取公告原文等信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告具体内容、公告链接、公告入库时间。(注:公告数据自2000/1/8始,按日更新)
"""
code, result = self.client.getData(vs.REPORTCONTENTBYID%(reportID, field))
return _ret_data(code, result)
def ThemesByNews2(self, insertBeginTime='', insertEndTime='', newsID='', field=''):
"""
获取新闻关联的主题数据,原API(获取新闻关联的主题数据-getThemesByNews)的升级版。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/06/17)
"""
code, result = self.client.getData(vs.THEMESBYNEWS2%(insertBeginTime, insertEndTime, newsID, field))
return _ret_data(code, result)
def ThemesByNewsTime2(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,原API(根据发布时间获取新闻关联的主题数据-getThemesByNewsTime)的升级版。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/06/17。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIME2%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
WiproOpenSourcePractice/bdreappstore | enu/real_time_event_detection/hadoopstream/reducer.py | 1 | 3287 | #!/usr/bin/env python
from operator import itemgetter
import sys
import os
os.environ['MPLCONFIGDIR'] = "/tmp/"
import pandas as pd
current_key = None
current_count = 0
key = None
variable_list = []
header = None
peClass = None
def qt_rmvd( string ):
string = string.strip()
if string.startswith("'") and string.endswith("'"):
string = string[1:-1]
return string
def print_row(key,pec, *dframes):
day = key.split('/')[-1].split(".")[0]
well = key.split('/')[-2]
key = [well,day]
if not day.startswith("Event"):
text = []
for var in ["Variable3","Variable7","Variable11","Variable14","Variable15"]:
for df in dframes:
text.append(df[var])
#if pec != None:
text.append(pec)
print ",".join(key)+","+",".join([str(t) for t in text])
#print '%s\t%s' % (key,text)
#print '%s\t%s' % (key,["%0.2f" % i for i in values])
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
key, values = line.split('\t', 1)
values = values[1:-1]
fields = values.split(',')
if current_key is None:
current_key = key
if current_key == key:
try:
x = float(qt_rmvd(fields[0]))
var = [float(qt_rmvd(x)) for x in fields[1:]]
variable_list.append(var)
except ValueError:
if not qt_rmvd(fields[0]).startswith("Day") :
header = [qt_rmvd(x) for x in fields[1:]]
#print key
else:
peClass = qt_rmvd(fields[1])
#print key, fields, peClass
elif len(variable_list) > 1:
#print current_key
df = pd.DataFrame(variable_list,columns=header)
print_row(current_key,peClass,df.mean(),df.std(),df.min(),df.max(),df.quantile(.95),df.quantile(.05),df.quantile(),df.skew(),df.kurt())
variable_list = []
header = []
peClass = None
current_key = key
try:
x = float(qt_rmvd(fields[0]))
var = [float(qt_rmvd(x)) for x in fields[1:]]
variable_list.append(var)
except ValueError:
if not qt_rmvd(fields[0]).startswith("Day") :
header = [qt_rmvd(x) for x in fields[1:]]
#print key
else:
peClass = qt_rmvd(fields[1])
#print key, fields, peClass
else:
variable_list = []
header = []
peClass = None
current_key = key
try:
x = float(qt_rmvd(fields[0]))
var = [float(qt_rmvd(x)) for x in fields[1:]]
variable_list.append(var)
except ValueError:
if not qt_rmvd(fields[0]).startswith("Day") :
header = [qt_rmvd(x) for x in fields[1:]]
#print key
else:
peClass = qt_rmvd(fields[1])
#print key, fields, peClass
#print current_key
if len(variable_list) > 1:
df = pd.DataFrame(variable_list,columns=header)
print_row(current_key,peClass,df.mean(),df.std(),df.min(),df.max(),df.quantile(.95),df.quantile(.05),df.quantile(),df.skew(),df.kurt())
| apache-2.0 |
mjgrav2001/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
stetelepta/data-science-experiments | experiments/nn-triangulation/utils/plot_utils.py | 1 | 3150 | from matplotlib import gridspec
from pylab import cm
from scipy import interpolate
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
def plot_predictions(probabilities, y_test, points_test, **params):
# reshape each column to its 2D version
probs_matrix = probabilities.reshape(params['grid_width'], params['grid_height'])
# new figure
plt.figure(figsize=(20, 10))
# set fig size
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 1, 1])
# plot prediction
plt.subplot(gs[0])
plt.title("prediction 2D histogram", y=1.08)
plt.axis('off')
plt.imshow(probs_matrix)
# plot true values
plt.subplot(gs[1])
plt.title("true value: (%.2f, %.2f)" % (points_test[0], points_test[1]), y=1.08)
plt.axis('off')
plt.imshow(y_test.reshape(params['grid_width'], params['grid_height']))
with plt.style.context(('seaborn-darkgrid')):
# plot sensor distances
plt.subplot(gs[2])
plot_sensors_distances(points_test, params['sensors'], params['grid_width'], params['grid_height'])
def plot_sensors_distances(point, sensors, grid_width, grid_height):
# point: (2, 1) array with 2D true coordinates of point
# sensors: (2, nr_sensors) array with sensor locations
# grid_width, grid_width: for drawing the grid
# reshape raveled sensors to (2, nr_sensors) matrix
sensors = sensors.reshape(-1, 2).T
# get current axis
ax = plt.gca()
# set margin for plotting grid
margin = 0
# setup axis
ax.set_xticks(np.arange(0-margin, grid_width+margin, 1), minor=False)
ax.set_yticks(np.arange(0-margin, grid_height+margin, 1), minor=False)
ax.axis([0-margin, grid_width+margin, grid_height+margin, -margin])
ax.xaxis.grid(True, which='major')
ax.yaxis.grid(True, which='major')
ax.xaxis.tick_top()
ax.set_aspect('equal')
# plot grid
ax.add_patch(
patches.Rectangle(
(0, 0),
grid_width,
grid_height,
fill=False, # remove background
color='darkgrey'
)
)
p1_y = plt.Circle((point[0], point[1]), 0.2, color='darkviolet', zorder=2)
ax.add_artist(p1_y)
# plot title
plt.title("(%.2f, %.2f)" % (point[0], point[1]), y=1.08)
# plot sensors
for i in range(0, sensors.shape[1]):
s = plt.Circle((sensors[0, i], sensors[1, i]), 0.5, color='k', zorder=1)
ax.add_artist(s)
# plot distances
for i in range(0, sensors.shape[1]):
# calculate distance between sensor and points
dx = point[0] - sensors[0, i]
dy = point[1] - sensors[1, i]
distance = np.sqrt(np.square(dx)+np.square(dy))
# plot range around sensor
r = plt.Circle((sensors[0, i], sensors[1, i]), distance, color='k', zorder=1, fill=False)
ax.add_artist(r)
# draw arrow from sensor to point
ax.arrow(sensors[0, i], sensors[1, i], dx, dy, head_width=0, head_length=0, fc='k', ec='k', lw=0.1, zorder=1)
ax.text(sensors[0, i] + dx/2, sensors[1, i] + dy/2, u'%.2f' % distance, fontsize=8)
| mit |
jlegendary/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
ANNarchy/ANNarchy | examples/izhikevich/Izhikevich.py | 2 | 1901 | #
# ANNarchy - Pulse-coupled network
#
# Implementation of the pulse-coupled network proposed in:
#
# Izhikevich, E.M. (2003). Simple Model of Spiking Neurons, IEEE Transaction on Neural Networks, 14:6.
#
# authors: Helge Uelo Dinkelbach, Julien Vitay
#
from ANNarchy import *
# Create the excitatory and inhibitory population
pop = Population(geometry=1000, neuron=Izhikevich)
Exc = pop[:800] ; Inh = pop[800:]
# Set the population parameters
re = np.random.random(800) ; ri = np.random.random(200)
Exc.noise = 5.0 ; Inh.noise = 2.0
Exc.a = 0.02 ; Inh.a = 0.02 + 0.08 * ri
Exc.b = 0.2 ; Inh.b = 0.25 - 0.05 * ri
Exc.c = -65.0 + 15.0 * re**2 ; Inh.c = -65.0
Exc.d = 8.0 - 6.0 * re**2 ; Inh.d = 2.0
Exc.v = -65.0 ; Inh.v = -65.0
Exc.u = Exc.v * Exc.b ; Inh.u = Inh.v * Inh.b
# Create the projections
exc_proj = Projection(pre=Exc, post=pop, target='exc')
exc_proj.connect_all_to_all(weights=Uniform(0.0, 0.5))
inh_proj = Projection(pre=Inh, post=pop, target='inh')
inh_proj.connect_all_to_all(weights=Uniform(0.0, 1.0))
# Compile
compile()
# Start recording the spikes in the network to produce the plots
M = Monitor(pop, ['spike', 'v'])
# Simulate 1 second
simulate(1000.0, measure_time=True)
# Retrieve the spike recordings and the membrane potential
spikes = M.get('spike')
v = M.get('v')
# Compute the raster plot
t, n = M.raster_plot(spikes)
# Compute the population firing rate
fr = M.histogram(spikes)
# Plot the results
import matplotlib.pyplot as plt
# First plot: raster plot
ax = plt.subplot(3,1,1)
ax.plot(t, n, 'b.', markersize=1.0)
# Second plot: membrane potential of a single excitatory cell
ax = plt.subplot(3,1,2)
ax.plot(v[:, 15]) # for example
# Third plot: number of spikes per step in the population.
ax = plt.subplot(3,1,3)
ax.plot(fr)
plt.show()
| gpl-2.0 |
caisq/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py | 30 | 40476 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
math_ops.reduce_sum(
math_ops.square(array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(feature,
array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(embeddings_anchor - embeddings_positive), 1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(data - axis_minimums, mask), dim,
keepdims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(data - axis_maximums, mask), dim,
keepdims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(diff - row_minimums, mask), 1,
keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(
math_ops.multiply(
math_ops.exp(diff_tiled - max_elements_vect), mask_tiled),
1,
keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keepdims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
| apache-2.0 |
didattica/his | src/df_generator.py | 1 | 1204 | import pandas as pd
import numpy as np
import random
num = 10000
df = pd.DataFrame()
df['age'] = np.random.choice(70, num) + 18
df['sex'] = [random.choice(['male','female']) for i in df['age']]
def get_sport_h(i):
if df['age'].iloc[i] < 30:
return np.random.choice(15)
if df['age'].iloc[i] < 40:
return np.random.choice(12)
if df['age'].iloc[i] < 50:
return np.random.choice(8)
if df['age'].iloc[i] < 60:
return np.random.choice(6)
return np.random.choice(4)
def get_height(i):
mu = 160
if df['sex'].iloc[i] == 'male':
mu = 175
mu = mu - df['age'].iloc[i] * .1 - df['sport_h'].iloc[i] * 0.01
return round(np.random.normal(mu, 10), 2)
def get_weight(i):
mu = 61
if df['sex'].iloc[i] == 'male':
mu = 72
mu = mu + df['age'].iloc[i] * .3
return round(np.random.normal(mu, 5), 2)
def get_sex_n(i):
if df['sex'].iloc[i] == 'male':
return 1
return 0
df['sport_h'] = [get_sport_h(i) for i in df.index]
df['height'] = [get_height(i) for i in df.index]
df['weight'] = [get_weight(i) for i in df.index]
df['sex_n'] = [get_sex_n(i) for i in df.index]
sns.heatmap(df.corr(), annot=True, fmt='.2f')
plt.show()
df.to_csv('/tmp/open_health_data.csv') | mit |
changhoonhahn/centralMS | centralms/tests/test_sfh.py | 1 | 3256 | '''
Test methods in sfh.py
'''
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import sfh as SFH
import util as UT
def test_IntegratedSFH(test, solver='euler'):
''' Come up with some tests
'''
if solver == 'euler':
ODEsolver = SFH.ODE_Euler
elif solver == 'rk4':
ODEsolver = SFH.ODE_RK4
if test == '1': # simplest test (PASSED)
z_of_t = lambda t: 1.
logSFR_M_z = lambda mm, zz: -9.
dlogmdz_kwargs = {
'logsfr_M_z': logSFR_M_z,
'f_retain': 1.,
'zoft': z_of_t
}
tt, output = ODEsolver(SFH.dlogMdt, np.array([0.]), np.arange(0., 10., 1.), 0.01, **dlogmdz_kwargs)
fig = plt.figure()
sub = fig.add_subplot(111)
sub.plot(tt, [10**out[0] for out in output], c='b', lw=2)
sub.plot(np.arange(0., 10., 1.), 1.+ np.arange(0., 10., 1), c='k', ls='--', lw=3)
plt.show()
elif test == '2': # time dependent dlogm/dt (PASSED)
z_of_t = lambda t: t
logSFR_M_z = lambda mm, zz: np.log10(zz**2)
dlogmdz_kwargs = {
'logsfr_M_z': logSFR_M_z,
'f_retain': 1.e-9,
'zoft': z_of_t
}
tt, output = ODEsolver(SFH.dlogMdt, np.array([0.]), np.arange(0., 10., 1.), 0.01, **dlogmdz_kwargs)
fig = plt.figure()
sub = fig.add_subplot(111)
sub.plot(tt, [10**out[0] for out in output], c='b', lw=2)
sub.plot(np.arange(0., 10., 1.), 1.+ (np.arange(0., 10., 1)**3)/3., c='k', ls='--', lw=3)
plt.show()
elif test == '3':
z_of_t = lambda t: 1
logSFR_M_z = lambda mm, zz: mm
dlogmdz_kwargs = {
'logsfr_M_z': logSFR_M_z,
'f_retain': 1.e-9,
'zoft': z_of_t
}
tt, output = ODEsolver(SFH.dlogMdt, np.array([0.]), np.arange(0., 10., 1.), 0.01, **dlogmdz_kwargs)
fig = plt.figure()
sub = fig.add_subplot(111)
sub.plot(tt, [10**out[0] for out in output], c='b', lw=2)
sub.plot(np.arange(0., 10., 1.), np.exp(np.arange(0., 10., 1)), c='k', ls='--', lw=3)
plt.show()
def test_zt_interpolate():
z_table, t_table = UT.zt_table()
z_of_t1 = interp1d(list(reversed(t_table)), list(reversed(z_table)), kind='cubic')
z_of_t2 = interp1d(t_table, z_table, kind='cubic')
z_of_t3 = interp1d(list(reversed(t_table[:25])), list(reversed(z_table[:25])), kind='cubic')
z_of_t4 = interp1d(t_table[:25], z_table[:25], kind='cubic')
fig = plt.figure()
sub = fig.add_subplot(111)
t_arr = np.arange(t_table[24], t_table[0], 0.1)
sub.plot(t_table[:20], (z_table[:20] - z_of_t1(t_table[:20]))/z_table[:20])
sub.plot(t_table[:20], (z_table[:20] - z_of_t2(t_table[:20]))/z_table[:20])
sub.plot(t_table[:20], (z_table[:20] - z_of_t3(t_table[:20]))/z_table[:20])
sub.plot(t_table[:20], (z_table[:20] - z_of_t4(t_table[:20]))/z_table[:20])
#sub.scatter(t_table[:20], z_table[:20], color='k', s=10, lw=0)
plt.show()
if __name__=='__main__':
test_zt_interpolate()
#test_IntegratedSFH('2', solver='rk4')
| mit |
rhiever/bokeh | bokeh/charts/builder/tests/test_dot_builder.py | 33 | 3939 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Dot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
xyvaluesdf = pd.DataFrame(xyvalues, index=['lists', 'loops'])
cat = ['lists', 'loops']
catjython = ['lists:0.75', 'loops:0.75']
catpypy = ['lists:0.5', 'loops:0.5']
catpython = ['lists:0.25', 'loops:0.25']
python = seg_top_python = [2, 5]
pypy = seg_top_pypy = [12, 40]
jython = seg_top_jython = [22, 30]
zero = [0, 0]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['catjython'], catjython)
assert_array_equal(builder._data['catpython'], catpython)
assert_array_equal(builder._data['catpypy'], catpypy)
assert_array_equal(builder._data['python'], python)
assert_array_equal(builder._data['jython'], jython)
assert_array_equal(builder._data['pypy'], pypy)
assert_array_equal(builder._data['seg_top_python'], seg_top_python)
assert_array_equal(builder._data['seg_top_jython'], seg_top_jython)
assert_array_equal(builder._data['seg_top_pypy'], seg_top_pypy)
assert_array_equal(builder._data['z_python'], zero)
assert_array_equal(builder._data['z_pypy'], zero)
assert_array_equal(builder._data['z_jython'], zero)
assert_array_equal(builder._data['zero'], zero)
lvalues = [[2, 5], [12, 40], [22, 30]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['cat0'], catpython)
assert_array_equal(builder._data['cat1'], catpypy)
assert_array_equal(builder._data['cat2'], catjython)
assert_array_equal(builder._data['0'], python)
assert_array_equal(builder._data['1'], pypy)
assert_array_equal(builder._data['2'], jython)
assert_array_equal(builder._data['seg_top_0'], seg_top_python)
assert_array_equal(builder._data['seg_top_1'], seg_top_pypy)
assert_array_equal(builder._data['seg_top_2'], seg_top_jython)
assert_array_equal(builder._data['z_0'], zero)
assert_array_equal(builder._data['z_1'], zero)
assert_array_equal(builder._data['z_2'], zero)
assert_array_equal(builder._data['zero'], zero)
| bsd-3-clause |
hlin117/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 103 | 2017 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matplotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
yehudagale/fuzzyJoiner | old/TripletLossFacenetLSTM_hpo.py | 1 | 19474 | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
obj = {}
obj['accuracy'] = accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| epl-1.0 |
0asa/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/linear_model/plot_ridge_path.py | 14 | 1599 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
glouppe/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 11810 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
antiface/mne-python | examples/visualization/plot_clickable_image.py | 7 | 2317 | """
================================================================
Demonstration of how to use ClickableImage / generate_2d_layout.
================================================================
In this example, we open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we use generate_2d_layout to turn those xy positions into a layout
for use with plotting topo maps. In this way, you can take arbitrary xy
positions and turn them into a plottable layout.
"""
# Authors: Christopher Holdgraf <[email protected]>
#
# License: BSD (3-clause)
from scipy.ndimage import imread
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage, add_background_image # noqa
from mne.channels import generate_2d_layout # noqa
print(__doc__)
# Set parameters and paths
plt.rcParams['image.cmap'] = 'gray'
im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif')
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data and click
im = imread(im_path)
plt.imshow(im)
"""
This code opens the image so you can click on it. Commented out
because we've stored the clicks as a layout file already.
# The click coordinates are stored as a list of tuples
click = ClickableImage(im)
click.plot_clicks()
coords = click.coords
# Generate a layout from our clicks and normalize by the image
lt = generate_2d_layout(np.vstack(coords), bg_image=im)
lt.save(layout_path + layout_name) # To save if we want
"""
# We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
# Create some fake data
nchans = len(lt.pos)
nepochs = 50
sr = 1000
nsec = 5
events = np.arange(nepochs).reshape([-1, 1])
events = np.hstack([events, np.zeros([nepochs, 2])])
data = np.random.randn(nepochs, nchans, sr * nsec)
info = mne.create_info(nchans, sr, ch_types='eeg')
epochs = mne.EpochsArray(data, info, events)
# Using the native plot_topo function with the image plotted in the background
f = mne.viz.plot_topo(epochs.average(), layout=lt, fig_background=im)
| bsd-3-clause |
fabianp/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
percyfal/bokeh | examples/models/file/trail.py | 6 | 4756 | # -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.layouts import Column
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid, Label,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5 * theta) ** 2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1) * cos(phi2) * haversin(delta_lon)
return 2 * R * atan2(sqrt(a), sqrt(1 - a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([distance(latlon[i + 1], latlon[i]) for i in range(len((latlon[:-1])))])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100 * np.diff(df.alt) / (1000 * dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
name = "Obiszów MTB XCM"
# Google Maps now requires an API key. You can find out how to get one here:
# https://developers.google.com/maps/documentation/javascript/get-api-key
API_KEY = "GOOGLE_API_KEY"
def trail_map(data):
lon = (min(data.lon) + max(data.lon)) / 2
lat = (min(data.lat) + max(data.lat)) / 2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(plot_width=800, plot_height=800, map_options=map_options, api_key=API_KEY)
plot.title.text = "%s - Trail Map" % name
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
if plot.api_key == "GOOGLE_API_KEY":
plot.add_layout(Label(x=240, y=700, x_units='screen', y_units='screen',
text='Replace GOOGLE_API_KEY with your own key',
text_color='red'))
return plot
def altitude_profile(data):
plot = Plot(plot_width=800, plot_height=400)
plot.title.text = "%s - Altitude Profile" % name
plot.x_range = DataRange1d()
plot.y_range = DataRange1d(range_padding=0)
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) # x grid
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) # y grid
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs=[[X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys=[[y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color=data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(x=data.dist, y=data.alt))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = Column(children=[altitude, trail])
doc = Document()
doc.add_root(layout)
if __name__ == "__main__":
doc.validate()
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
batterseapower/timeseries-compression | python/graph.py | 1 | 1863 | import numpy as np
import pandas as pd
import ctypes
from datetime import date
from matplotlib import pyplot as plt
def f2l(x):
return ctypes.cast((ctypes.c_float*1)(x), ctypes.POINTER(ctypes.c_int)).contents.value
vod = pd.read_csv('VOD.csv').set_index('Date')
vod.index = pd.DatetimeIndex(vod.index)
xs = vod.reindex(pd.bdate_range(min(vod.index), max(vod.index)))['Adj Close']
#xs = [(y / x) - 1.0 for x, y in zip(xs, xs[1:])] # Returns
xs = filter(lambda x: x != 0.0 and x == x, xs) # Sane values only 0s/NaNs
xs = map(f2l, xs)
df = pd.DataFrame.from_dict({
'sign': [(x & 0x80000000) >> 31 for x in xs],
'exponent': [(x & 0x7F800000) >> 23 for x in xs],
'mantissa': [(x & 0x007FFFFF) >> 0 for x in xs],
})
def show_correlation(mantissa):
pd.DataFrame.from_dict({
'low order': mantissa.apply(lambda x: (x & 0x00FF) >> 0),
'high order': mantissa.apply(lambda x: (x & 0xFF00) >> 8),
}).plot(kind='scatter', title='low order bytes correlation', x='low order', y='high order')
plt.show()
pd.DataFrame.from_dict({
'low order': mantissa.apply(lambda x: (x & 0x00FF00) >> 8),
'high order': mantissa.apply(lambda x: (x & 0x7F0000) >> 16),
}).plot(kind='scatter', title='high order bytes correlation', x='low order', y='high order')
plt.show()
mantissa_delta = pd.Series([x - y for x, y in zip(df.mantissa[1:], df.mantissa)])
show_correlation(df['mantissa'])
show_correlation(mantissa_delta)
for c in ['sign', 'exponent', 'mantissa']:
df[c].plot(title=c)
plt.show()
mantissa_delta.plot(title='Mantissa Delta')
plt.show()
mantissa_delta.plot(kind='kde', title='Distribution of Mantissa Delta')
plt.show()
modal_exponent = df['exponent'].value_counts().idxmax()
mants = sorted(set(df[df.exponent == modal_exponent]['mantissa']))
print pd.Series([y - x for x, y in zip(mants, mants[1:])]).value_counts()
| apache-2.0 |
shangwuhencc/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
GraphProcessor/CommunityDetectionCodes | Prensentation/algorithms/dynamics/others/aync_vs_sync.py | 1 | 2081 | # coding=utf8
import sys
import random
import networkx as nx
import matplotlib.pyplot as plt
# async lpa implementation from https://github.com/anatman-xx/lpa
# label-propagation algorithm
# use asynchronous updating for better results
def lpa(graph):
"""
:type graph: nx.Graph
"""
def estimate_stop_cond():
for node in graph.nodes_iter():
count = {}
for neighbor in graph.neighbors_iter(node):
neighbor_label = graph.node[neighbor]['label']
neighbor_weight = graph.edge[node][neighbor]['weight']
count[neighbor_label] = count.setdefault(neighbor_label, 0.0) + neighbor_weight
# find out labels with maximum count
count_items = count.items()
count_items.sort(key=lambda x: x[1], reverse=True)
# if there is not only one label with maximum count then choose one randomly
labels = [k for k, v in count_items if v == count_items[0][1]]
if graph.node[node]['label'] not in labels:
return False
return True
loop_count = 0
while True:
loop_count += 1
print 'loop', loop_count
for node in graph.nodes_iter():
count = {}
for neighbor in graph.neighbors_iter(node):
neighbor_label = graph.node[neighbor]['label']
neighbor_weight = graph.edge[node][neighbor]['weight']
count[neighbor_label] = count.setdefault(neighbor_label, 0.0) + neighbor_weight
# find out labels with maximum count
count_items = count.items()
count_items.sort(key=lambda x: x[1], reverse=True)
# if there is not only one label with maximum count then choose one randomly
labels = [(k, v) for k, v in count_items if v == count_items[0][1]]
label = random.sample(labels, 1)[0][0]
graph.node[node]['label'] = label
if estimate_stop_cond() is True or loop_count >= 10:
print 'complete'
return
| gpl-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/io/tests/generate_legacy_storage_files.py | 9 | 8044 | """ self-contained to write legacy storage (pickle/msgpack) files """
from __future__ import print_function
from distutils.version import LooseVersion
from pandas import (Series, DataFrame, Panel,
SparseSeries, SparseDataFrame, SparsePanel,
Index, MultiIndex, PeriodIndex, bdate_range, to_msgpack,
date_range, period_range, bdate_range, Timestamp, Categorical,
Period)
import os
import sys
import numpy as np
import pandas
import pandas.util.testing as tm
import platform as pl
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = 'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = 'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10).astype(np.int64),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'))
if LooseVersion(pandas.__version__) >= '0.17.0':
scalars['period'] = Period('2012','M')
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10))
mi = dict(reg2=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']])),
names=['first', 'second']))
series = dict(float=Series(data['A']),
int=Series(data['B']),
mixed=Series(data['E']),
ts=Series(np.arange(10).astype(np.int64), index=date_range('20130101',periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])),
names=['one', 'two'])),
dup=Series(np.arange(5).astype(np.float64), index=['A', 'B', 'C', 'D', 'A']),
cat=Series(Categorical(['foo', 'bar', 'baz'])),
dt=Series(date_range('20130101',periods=5)),
dt_tz=Series(date_range('20130101',periods=5,tz='US/Eastern')))
if LooseVersion(pandas.__version__) >= '0.17.0':
series['period'] = Series([Period('2000Q1')] * 5)
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
frame = dict(float=DataFrame(dict(A=series['float'], B=series['float'] + 1)),
int=DataFrame(dict(A=series['int'], B=series['int'] + 1)),
mixed=DataFrame(dict([(k, data[k]) for k in ['A', 'B', 'C', 'D']])),
mi=DataFrame(dict(A=np.arange(5).astype(np.float64), B=np.arange(5).astype(np.int64)),
index=MultiIndex.from_tuples(tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'baz'],
['one', 'two', 'one', 'two', 'three']])),
names=['first', 'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=['A', 'B', 'A']),
cat_onecol=DataFrame(dict(A=Categorical(['foo', 'bar']))),
cat_and_float=DataFrame(dict(A=Categorical(['foo', 'bar', 'baz']),
B=np.arange(3).astype(np.int64))),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), B=Timestamp('20130603', tz='CET')), index=range(5)),
)
mixed_dup_panel = Panel(dict(ItemA=frame['float'], ItemB=frame['int']))
mixed_dup_panel.items = ['ItemA', 'ItemA']
panel = dict(float=Panel(dict(ItemA=frame['float'], ItemB=frame['float'] + 1)),
dup=Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64),
items=['A', 'B', 'A']),
mixed_dup=mixed_dup_panel)
return dict(series=series,
frame=frame,
panel=panel,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()))
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if LooseVersion(pandas.__version__) < '0.14.1':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
return data
def create_msgpack_data():
data = create_data()
if LooseVersion(pandas.__version__) < '0.17.0':
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
return data
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()), str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import zip, cPickle as pickle
except:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir):
version = pandas.__version__
print("This script generates a storage file for the current arch, system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data())
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if len(sys.argv) != 3:
exit("Specify output directory and storage type: generate_legacy_storage_files.py <output_dir> <storage_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
| artistic-2.0 |
nesterione/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
inodb/gefes | gefes/graphs/assembly_plots.py | 1 | 1059 | # Built-in modules #
# Internal modules #
from gefes.graphs import Graph
# Third party modules #
import pandas
from matplotlib import pyplot
from Bio import SeqIO
# Constants #
__all__ = ['ContigDist']
################################################################################
class ContigDist(Graph):
"""General distribution of the contigs for a pool"""
short_name = 'contig_dist'
def plot(self):
# Data #
lengths = map(len, SeqIO.parse(self.parent.p.amos_dir + 'contigs.fasta' , 'fasta'))
values = pandas.Series(lengths)
# Plot #
fig = pyplot.figure()
axes = values.hist(color='gray', bins=2000)
fig = pyplot.gcf()
title = 'Distribution of contigs lengths after scaffolding for sample "%s"' % self.parent.pool.long_name
axes.set_title(title)
axes.set_xlabel('Number of nucleotides in sequence')
axes.set_ylabel('Number of sequences with this length')
axes.xaxis.grid(False)
# Save it #
self.save_plot(fig, axes, sep=('x')) | mit |
openbermuda/karmapi | karmapi/show.py | 1 | 1823 | """
Show images.
"""
from IPython import display
import pandas
from PIL import Image
from matplotlib import pyplot
from karmapi import base, heart
def show(path):
""" Show image for path """
path = str(path)
#if path.endswith('.gif'):
# return hshow(path)
return Image.open(path)
def hshow(path):
""" Show image for path using HTML
For some reason, IPython.display.Image does not do GIFs.
So we embed it in html instead.
"""
return display.HTML('<img src="{}">'.format(path))
def movie(images, path=None, **kwargs):
""" Create a movie for images """
import imageio
if path is None:
path = imageio.RETURN_BYTES
return imageio.mimwrite(path, images, format='GIF', **kwargs)
def load(path):
return Image.open(str(path))
def save(path, image):
im = Image.fromarray(image)
im.save(str(path))
def sono(so, offset=1, end=None, **kwargs):
""" Show an sonogram image """
so = pandas.np.array(so)
n = so.shape[1]
if end is None:
end = n / 2
pyplot.title('offset: {} end: {} n: {}'.format(offset, end, n))
pyplot.imshow(so[:, offset:int(end)].T.real, aspect='auto',
**kwargs)
def sono2(so, offset=1, end=None, **kwargs):
""" Show an sonogram image """
so = pandas.np.array(so)
n = so.shape[1]
if end is None:
end = n / 2
pyplot.figure(figsize=(12, 4))
pyplot.subplot(1, 2, 1)
pyplot.title('offset: {} end: {} n: {}'.format(offset, end, n))
pyplot.imshow(so[:, offset:int(end)].T.real, aspect='auto',
**kwargs)
pyplot.subplot(1, 2, 2)
pyplot.imshow(so[:, offset:int(end)].T.imag, aspect='auto',
**kwargs)
def wide():
pyplot.figure(figsize=(12,4))
| gpl-3.0 |
heli522/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
JT5D/scikit-learn | sklearn/linear_model/__init__.py | 3 | 2735 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import LogisticRegression
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskLasso',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
Ehrax/ycsb_plotting | plot.py | 1 | 2252 | #!/bin/python
import argparse
import re
import matplotlib.pyplot as plt
import os
CURRENT_DIR = os.getcwd()
one_file = False
def parse_file(str):
regex = re.compile("([0-9]+)(.)?.sec.\s[0-9]+\s", re.DOTALL)
ops_regex = re.compile("([0-9]+(\.[0-9]+)?)\scurrent\sops\/sec")
sec_tuple = regex.findall(str)
ops_tuple = ops_regex.findall(str)
sec_list = [x[0] for x in sec_tuple]
ops_list = ['0'] + [x[0] for x in ops_tuple]
return sec_list, ops_list
def plot_files(paths):
regex = re.compile(".+/(.+)\.+")
for p in paths:
title = re.match(regex, p).group(1)
data = open(p).read()
sec_list, ops_list = parse_file(data)
draw(sec_list, ops_list, title)
if not one_file:
plt.savefig("{}/results/{}".format(CURRENT_DIR, title))
plt.clf()
else:
continue
if one_file:
plt.savefig("{}/results/{}".format(CURRENT_DIR, "Test"))
def draw(x, y, title):
plt.xlabel("sec")
plt.ylabel("current ops/s")
plt.title(title)
plt.plot(x, y)
def plot_recursive(paths, prefix):
paths_found = []
for p in paths:
for root, dirs, files in os.walk(p):
for file in files:
if file.startswith(prefix):
paths_found.append(os.path.join(root, file))
elif prefix == "all":
paths_found.append(os.path.join(root, file))
plot_files(paths_found)
def main():
global one_file
parser = argparse.ArgumentParser(description="YCSB - Plotting Tool")
parser.add_argument("path", nargs="+", help="path of files")
parser.add_argument("--o", action="store_true", help="use this if "
"you want only one "
"ouput file")
parser.add_argument("--r", metavar="'prefix'")
args = parser.parse_args()
paths = args.path
if args.o:
one_file = True
if not os.path.exists("./results"):
os.makedirs("./results")
if args.r is not None:
prefix = args.r
plot_recursive(paths, prefix)
else:
plot_files(paths)
if __name__ == "__main__":
main()
| mit |
andrewnc/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/plotting/test_converter.py | 14 | 7243 | import pytest
from datetime import datetime, date
import numpy as np
from pandas import Timestamp, Period, Index
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro, Day
from pandas.compat.numpy import np_datetime64_compat
converter = pytest.importorskip('pandas.plotting._converter')
def test_timtetonum_accepts_unicode():
assert (converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(object):
def setup_method(self, method):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert (r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(date(2012, 1, 1), None, None)
assert rs == xp
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
assert rs == xp
rs = self.dtc.convert('2012-1-1', None, None)
assert rs == xp
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
assert rs == xp
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np_datetime64_compat('2012-01-01'), None, None)
assert rs == xp
rs = self.dtc.convert(np_datetime64_compat(
'2012-01-01 00:00:00+0000'), None, None)
assert rs == xp
rs = self.dtc.convert(np.array([
np_datetime64_compat('2012-01-01 00:00:00+0000'),
np_datetime64_compat('2012-01-02 00:00:00+0000')]), None, None)
assert rs[0] == xp
# we have a tz-aware date (constructed to that when we turn to utc it
# is the same as our sample)
ts = (Timestamp('2012-01-01')
.tz_localize('UTC')
.tz_convert('US/Eastern')
)
rs = self.dtc.convert(ts, None, None)
assert rs == xp
rs = self.dtc.convert(ts.to_pydatetime(), None, None)
assert rs == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]), None, None)
assert rs[1] == xp
rs = self.dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(),
None, None)
assert rs[1] == xp
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(
Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(
Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
tm.assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
tm.assert_almost_equal(rs, xp, decimals)
def test_conversion_outofbounds_datetime(self):
# 2579
values = [date(1677, 1, 1), date(1677, 1, 2)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
rs = self.dtc.convert(values, None, None)
xp = converter.dates.date2num(values)
tm.assert_numpy_array_equal(rs, xp)
rs = self.dtc.convert(values[0], None, None)
xp = converter.dates.date2num(values[0])
assert rs == xp
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k=10, freq=freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
tm.assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1,
val2))
# Matplotlib's time representation using floats cannot distinguish
# intervals smaller than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
def test_convert_nested(self):
inner = [Timestamp('2017-01-01', Timestamp('2017-01-02'))]
data = [inner, inner]
result = self.dtc.convert(data, None, None)
expected = [self.dtc.convert(x, None, None) for x in data]
assert result == expected
class TestPeriodConverter(object):
def setup_method(self, method):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
assert r1 == r2
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
assert rs == xp
rs = self.pc.convert('2012-1-1', None, self.axis)
assert rs == xp
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
assert rs == xp
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
assert rs == xp
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
assert rs == xp
# FIXME
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01'), None, self.axis)
# assert rs == xp
#
# rs = self.pc.convert(
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# None, self.axis)
# assert rs == xp
#
# rs = self.pc.convert(np.array([
# np_datetime64_compat('2012-01-01 00:00:00+0000'),
# np_datetime64_compat('2012-01-02 00:00:00+0000')]),
# None, self.axis)
# assert rs[0] == xp
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
assert rs == xp
def test_convert_nested(self):
data = ['2012-1-1', '2012-1-2']
r1 = self.pc.convert([data, data], None, self.axis)
r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)]
assert r1 == r2
| mit |
herilalaina/scikit-learn | examples/applications/plot_prediction_latency.py | 20 | 11519 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
# #############################################################################
# Main code
start_time = time.time()
# #############################################################################
# Benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True,
tol=1e-4),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
pratapvardhan/pandas | pandas/io/sas/sas7bdat.py | 3 | 27470 | """
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.errors import EmptyDataError
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas._sas import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.SASIndex.data_subheader_index
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.SASIndex.row_size_index:
processor = self._process_rowsize_subheader
elif subheader_index == const.SASIndex.column_size_index:
processor = self._process_columnsize_subheader
elif subheader_index == const.SASIndex.column_text_index:
processor = self._process_columntext_subheader
elif subheader_index == const.SASIndex.column_name_index:
processor = self._process_columnname_subheader
elif subheader_index == const.SASIndex.column_attributes_index:
processor = self._process_columnattributes_subheader
elif subheader_index == const.SASIndex.format_and_label_index:
processor = self._process_format_subheader
elif subheader_index == const.SASIndex.column_list_index:
processor = self._process_columnlist_subheader
elif subheader_index == const.SASIndex.subheader_counts_index:
processor = self._process_subheader_counts
elif subheader_index == const.SASIndex.data_subheader_index:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if len(self.column_types) == 0:
self.close()
raise EmptyDataError("No columns to parse from file")
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates:
unit = None
if self.column_formats[j] in const.sas_date_formats:
unit = 'd'
elif self.column_formats[j] in const.sas_datetime_formats:
unit = 's'
if unit:
rslt[name] = pd.to_datetime(rslt[name], unit=unit,
origin="1960-01-01")
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
| bsd-3-clause |
astrofrog/numpy | numpy/lib/recfunctions.py | 13 | 34877 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names),set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and not f in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
echohenry2006/tvb-library | contrib/from_articles/region_deterministic_bnm_sj2d.py | 5 | 6136 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figures XX Sanz-Leon P., PhD Thesis
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_sjd2d.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_sj2d.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 20, 15 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
pylab.rcParams.update({'lines.linewidth': 3})
pylab.rcParams.update({'axes.linewidth': 3})
parser = argparse.ArgumentParser(description='Reproduce results of Figure XX presented in Sanz-Leon 2014 PhD Thesis')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
idx = ['a0', 'a1', 'a2']
gcs = [0.0, 0.5, 1.0]
simulation_length = 2e3
speed = 10.
if args['sim']:
for i in range(3):
oscilator = models.ReducedSetFitzHughNagumo()
oscilator.variables_of_interest = ["xi", "eta", "alpha","beta"]
white_matter = connectivity.Connectivity.from_file("connectivity_66.zip")
white_matter.speed = numpy.array([speed])
white_matter_coupling = coupling.Linear(a=gcs[i])
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=0.1)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=1.)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
# LOG.info("Starting simulation...")
# #Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
TAVG = numpy.asarray(tavg_data)
RAW = numpy.asarray(raw_data)
LOG.info("Saving simulated data ...")
numpy.save('region_deterministic_bnm_sj2d_raw_' + idx[i] + '.npy', RAW)
numpy.save('region_deterministic_bnm_sj2d_tavg_' + idx[i] + '.npy', TAVG)
numpy.save('region_deterministic_bnm_sj2d_rawtime_' + idx[i] + '.npy', raw_time)
numpy.save('region_deterministic_bnm_sj2d_tavgtime_' + idx[i] + '.npy', tavg_time)
if args['fig']:
for i in range(3):
start_point = simulation_length // 4
end_point = simulation_length // 4 + start_point // 2
LOG.info("Generating pretty pictures ...")
TAVG = numpy.load('region_deterministic_bnm_sj2d_tavg_' + idx[i] + '.npy')
tavg_time = numpy.load('region_deterministic_bnm_sj2d_tavgtime_' + idx[i] + '.npy')[start_point:end_point]
fig= figure(1)
clf()
for k in range(3):
# load data
# compute time and use sim_length
ax=subplot(3, 3, 4+k)
plot(tavg_time, TAVG[start_point:end_point, 0, :, k],'k', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 1, :, k],'r', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 0, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:end_point, 1, :, k].mean(axis=1), 'r')
ylim([-5, 2])
xlim([start_point, int(end_point)])
for label in ax.get_yticklabels():
label.set_fontsize(20)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==0:
ylabel('[au]')
yticks((-4, 0, 1), ('-4', '0', '1'))
title(r'TS ($m=1$)')
ax=subplot(3, 3, 7+k)
plot(tavg_time, TAVG[start_point:end_point, 2, :, k],'k', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 3, :, k],'r', alpha=0.042, linewidth=3)
plot(tavg_time, TAVG[start_point:end_point, 2, :, k].mean(axis=1), 'k')
plot(tavg_time, TAVG[start_point:end_point, 3, :, k].mean(axis=1), 'r')
ylim([-5, 2])
xlim([start_point, int(end_point)])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
xticks((start_point, end_point), (str(int(start_point)), str(int(end_point))))
xlabel('time[ms]')
if k==0:
ylabel('[au]')
yticks((-4, 0, 1), ('-4', '0', '1'))
title(r'TS ($m=2$)')
ax=subplot(3, 3, 1+k)
plot(TAVG[start_point:end_point, 0, :, k], TAVG[start_point:end_point, 1, :, k],'b', alpha=0.042)
plot(TAVG[start_point:end_point, 0, :, k].mean(axis=1), TAVG[start_point:end_point, 1, :, k].mean(axis=1), 'b')
title(r'PP ($o=%s$)' % str(k))
ax.yaxis.set_label_position("right")
ylim([-5, 2])
xlim([-5, 2])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if k==1:
xticks((-4, 0, 1), ('-4', '0', '1'))
ax.xaxis.labelpad = -10
xlabel(r'$\xi$')
yticks((-4, 0, 1), ('-4', '0', '1'))
ylabel(r'$\eta$')
fig_name = 'SJ2D_default_speed_' + str(int(speed)) + '-config_gcs-' + idx[i] + '.png'
savefig(fig_name)
###EoF### | gpl-2.0 |
russel1237/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
SurveyMan/SMPy | evaluation/evaluation.py | 1 | 13463 | import urllib, httplib
import csv, os, sys
import numpy as np
import matplotlib.pyplot as pyplot
import math
from survey.objects import *
# first evaluate bot detection
# want to compare expected number of catch questions, percent bots, ability to catch
# maybe want to vary by the number of profiles (corresponds to clusters)
def bias(q1, q2):
pass
def entropy(survey, responses):
emp_prob = empirical_prob(frequency(survey, responses))
ent = 0.0
for q in emp_prob.keys():
for p in emp_prob[q].values():
if p > 0:
ent += p * (math.log(p) / math.log(2))
return -ent
def frequency(survey, responses):
""" responses needs to be a single list"""
freqs = {q : {o : 0 for o in q.options} for q in survey.questions}
for response in responses:
for q in response.keys():
o = response[q][0]
freqs[q][o] += 1
return freqs
def empirical_prob(fmap):
probs = {q : {o : 0 for o in list(fmap[q].keys())} for q in list(fmap.keys())}
for q in list(fmap.keys()):
total = sum(fmap[q].values()) # should be equal to the total number of respondents if we don't permit breakoff
for o in list(fmap[q].keys()):
if total == 0:
probs[q][o] = 0.0
else:
probs[q][o] = float(fmap[q][o]) / float(total)
return probs
def log_likelihood(response, pmap):
likelihood = 0.0
for q in list(response.keys()):
o = response[q][0]
likelihood -= math.log(pmap[q][o])
return likelihood
def ind_entropy(response, pmap):
ent = 0.0
for q in list(response.keys()):
o = response[q][0]
ent -= pmap[q][o] * math.log(pmap[q][o]) / math.log(2)
return ent
def make_bootstrap_interval(survey, responses, alpha, method, stat=np.average, parametric=True):
B = 2000
pmap = empirical_prob(frequency(survey, responses))
#stats = [method(r, pmap) for r in responses]
bootstrap_sample = [np.random.choice(responses, len(responses), replace=True) for _ in range(B)]
bootstrap_stat = [[method(r,pmap) for r in s] for s in bootstrap_sample]
data = sorted([stat(bss) for bss in bootstrap_stat])
if parametric:
bs_mean = np.average([np.average(samp) for samp in bootstrap_stat])
bs_std = np.std([np.average(samp) for samp in bootstrap_stat])
return (bs_mean - 2*bs_std, bs_mean + 2*bs_std)
else:
aindex = int(math.floor((alpha / 2.0)*len(responses)))
bindex = int(math.floor((1.0 - (alpha / 2.0))*len(responses)))
return (data[aindex], data[bindex])
def get_least_popular_options(survey, responses, diff):
fmap = frequency(survey, responses)
least_popular = {}
for q in list(fmap.keys()):
optfreqs = list(fmap[q].items())
optfreqs = sorted(optfreqs, key = lambda t : t[1])
for (i, j) in [(k, k+1) for k in range(len(optfreqs)-1)]:
if optfreqs[i][1] < optfreqs[j][1]*diff:
least_popular[q] = optfreqs[:j]
break
print("Number of questions with least popular options : %d" % len([opts for opts in list(least_popular.values()) if len(opts)!=0]))
return least_popular
def get_mu(survey, least_popular_options):
expectation = 0
for q in survey.questions:
if q in least_popular_options:
expectation += float(len(least_popular_options[q])) / float(len(q.options))
return expectation
def num_least_popular(response, lpo):
n = 0
for q in lpo.keys():
if q not in response:
# in case this person didn't answer this question
continue
opt = response[q][0]
if opt in [o[0] for o in lpo[q]]:
n += 1
return n
def bot_lazy_responses_entropy(survey, responses, alpha, worker_ids):
emp_prob = empirical_prob(frequency(survey, responses))
lo, hi = make_bootstrap_interval(survey, responses, alpha, ind_entropy, parametric=False)
print "entropy bounds: " , hi, lo
classifications = []
for response in responses:
ent = ind_entropy(response, emp_prob)
print ent, len(response), ent > hi
classifications.append((response, ent > hi, ent))
return classifications
def detect_variants(q1, q2, responses):
pass
def bot_lazy_responses_unordered(survey, responses, delta, diff):
lpo = get_least_popular_options(survey, responses, diff)
mu = get_mu(survey, lpo)
alpha = pow(math.e, (- delta * mu) / (2 + delta))
print("Expect %f least popular answers for a bot; bots will answer fewer than this with probability %f" % (mu, alpha))
classifications = []
for response in responses:
n = num_least_popular(response, lpo)
classifications.append((response, n >= round(mu), n))
return classifications
def bot_lazy_responses_ordered(survey, responses, alpha, workerids):
# create mapping of total number of options
stages = {}
for question in survey.questions:
m = len(question.options)
if m not in stages:
stages[m] = []
stages[m].append(question)
classifications = []
for (i, response) in enumerate(responses):
workerid = workerids[i]
this_classification = []
stuff_to_print = {0: "", 1 : "", 2 : "", 3 : "", 4 : 0}
for (m, questions) in stages.items():
if m > 1 and len(questions) > 1:
#print([(j, m-j-1) for j in range(int(math.floor(m/2)))])
for (hi, lo) in [(j, m - j - 1) for j in range(int(math.floor(m/2)))]:
hict = len([opos for (_, (_, _, opos)) in [(q, tupe) for (q, tupe) in response.items() if q in questions] if int(opos) == hi])
#print("Number at stage %d, position %d: %d" % (m, hi, hict))
loct = len([opos for (_, (_, _, opos)) in [(q, tupe) for (q, tupe) in response.items() if q in questions] if int(opos) == lo])
#print("Number at stage %d, position %d: %d" % (m, lo, loct))
n = hict + loct
if n == 0:
continue
mu = 0.5 * n
delta = math.sqrt((3 * math.log(alpha)) / (- mu))
x = {True : hict, False : loct}[hict > loct]
b = (1 + delta) * mu
c = x >= (1 + delta) * mu
if x >= b:
if hict > loct:
stuff_to_print[hi] = "%d >= %f" % (x,b)
stuff_to_print[lo] = str(loct)
else :
stuff_to_print[hi] = str(hict)
stuff_to_print[lo] = "%d >= %f" % (x,b)
else :
stuff_to_print[hi] = str(hict)
stuff_to_print[lo] = str(loct)
stuff_to_print[4] += hict + loct
#print("If %d >= %f : Bot? %s, workerid: %s, amazon reviews?: %s\n" % (x, b, c, workerid, amazon(workerid)))
this_classification.append((response, x >= (1 + delta) * mu, n))
if any([t[1] for t in this_classification]):
print "<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%d</td>" % (stuff_to_print[0], stuff_to_print[1], stuff_to_print[2], stuff_to_print[3], stuff_to_print[4])
# policy for deciding bots for each question length? what's
# the probability of making an incorrect classification?
# will probably want a discount factor over these things
# for now, just return that it's a bot if one is true
classifications.append((response, any([t[1] for t in this_classification]), workerid))
return classifications
def amazon(workerid):
return True
h = httplib.HTTPConnection('www.amazon.com')
h.request('GET', '/gp/pdp/profile/' + workerid)
r = h.getresponse()
return r.status != 404
def get_disagreeing_correlations(classifications, responses):
# flag respondents who disagree on any of the strongly correlated
# answers
classifications = []
def make_plot(data, title, filename):
false_neg = [np.average([a[0] for a in stuff]) for stuff in data]
false_neg_min = np.array([min([a[0] for a in stuff]) for stuff in data])
false_neg_max = np.array([max([a[0] for a in stuff]) for stuff in data])
false_pos = [np.average([a[1] for a in stuff]) for stuff in data]
false_pos_min = np.array([min([a[1] for a in stuff]) for stuff in data])
false_pos_max = np.array([max([a[1] for a in stuff]) for stuff in data])
xaxis = [stuff[0][2] for stuff in data]
fig, ax = pyplot.subplots()
ax.set_title(title)
ax.errorbar(xaxis, false_pos, yerr=[false_pos - false_pos_min, false_pos_max - false_pos], c='y', fmt='o', label='Humans identified as bots')
ax.errorbar(xaxis, false_neg, yerr=[false_neg - false_neg_min, false_neg_max - false_neg], c='r', fmt='o', label='Bots identified as humans')
ax.legend(numpoints=1)
pyplot.axis([0,1.0,0,1.2])
pyplot.ylabel("Percent misclassified")
pyplot.xlabel("Percent bots in the population")
#pyplot.show()
fig.savefig(filename)
# def now_with_my_thing(clusters):
# n=10
# m=5
# s1 = Survey([Question("", [Option("") for _ in range(m)], qtypes["radio"], shuffle=True) for _ in range(n)])
# data = []
# for i in range(1,10):
# sub_data = []
# for j in range(100):
# bots, nots = sample(s1, make_profiles(s1, clusters), 100, i/10.0)
# classifications = classify2(s1, bots, nots, 1.0, 0.75)
# (false_negative, false_positive) = analyze_classifications(classifications)
# sub_data.append((float(false_negative) / float(len(bots)) , float(false_positive) / float(len(nots)), i/10.0))
# data.append(sub_data)
# return data
def run_this():
make_plot(now_with_my_thing(1), "Bots answer >= expected min questions", "balls_n_bins_1_cluster.png")
make_plot(generate_bots_v_humans(classify_humans_as_outliers, 1), "Humans are outliers; 1 cluster of humans", "humans_outliers_1_cluster.png")
make_plot(generate_bots_v_humans(classify_bots_as_outliers, 1), "Bots are outliers; 1 cluster of humans", "bots_outliers_1_cluster.png")
# now simulate breakoff
# hypothesis : when breakoff is permitted, people will stop around a clustered point according to their personal utility function
# we believe that this point follows a normal distribution.
# if we marginalize for position, we should be left with the probability of abandonment for a particular question
# take a set of profiles, let one of the questions be offensive. assign a level of offense for each profile.
# since the cost of abandonment for web surveys is very low, numerous factors impact the positional preferences for abadonment.
# model the behavior we believe happens on mechanical turk: bots will abandon the survey after the first question, whereas
# people will abandon upon seeing an offensive question or when the survey has exceeded their tolerance.
# people don't know how long the survey is; this will impact their calculus for abandonment. we expect to see different behavior in
# the case where progress bars are represented. however, we believe that statistically the behavior will remain the same - it just may
# increase the user's tolerance for length
def make_breakoff_profiles(s, n):
profiles = make_profiles(s,n)
#print(len(profiles))
for profile in profiles:
offensive = np.random.choice(list(profile.keys()), 1, replace=True)[0]
for quid in list(profile.keys()):
# i have some marginal probability of abandoning at every question, but let my probability of abandoning at a particular question be very high
oid, prob = profile[quid]
oprob = random.random()
if quid==offensive:
print(offensive, oprob , vprob)
print(len(profiles))
return profiles
def breakoff_frequency_by_question(survey, responses):
breakoff = {q.quid : 0 for q in survey.questions}
for response in responses:
questions_by_index = sorted(list(response.items()), key = lambda x : int(x[1][1]))
breakoff[questions_by_index[-1][0].quid] += 1
return breakoff
def breakoff_frequency_by_position(survey, responses):
# 0-indexed
breakoff = {i : 0 for i in range(len(survey.questions))}
for response in responses:
breakoff[len(response)-1] += 1
return breakoff
def get_interval(samp, alpha, norm=False):
B = 2000
bootstrap_sample = [sorted(np.random.choice(samp, len(samp), replace=True)) for _ in range(B)]
bootstrap_means = [np.average(samp) for samp in bootstrap_sample]
bootstrap_mean = np.average(bootstrap_means)
if norm :
bootstrap_std = np.std(bootstrap_means)
a, b = bootstrap_mean - 2.0*bootstrap_std, bootstrap_mean + 2.0*bootstrap_std
else :
hi = int(math.ceil((1-alpha)*len(bootstrap_means)))
lo = int(math.floor(alpha*len(bootstrap_means)))
a = sorted(bootstrap_means)[lo]
b = sorted(bootstrap_means)[hi]
return (a, b)
def identify_breakoff_questions(survey, responses, alpha):
fmap1 = breakoff_frequency_by_position(survey, responses)
fmap2 = breakoff_frequency_by_question(survey, responses)
return (fmap1, fmap2)
| apache-2.0 |
WillArmentrout/galSims | plotting/new/Plotlv_Combine.py | 1 | 4580 | import pylab as plt
import math
import numpy as np
from matplotlib.pyplot import Rectangle # Used to make dummy legend
markerScaling=25.
pointSize=3.
# Open CSV File
datafile = open(r'C:\Users\newye\OneDrive\Documents\GitHub\galSims\misc\HIIregion_popSynthesis_test.csv', 'r')
csvFile = []
for row in datafile:
csvFile.append(row.strip().split(','))
# Save Galactic Radius Info from CSV to new list
ldata = list()
vdata = list()
index = 0
NlyLimit = 47.56
#NlyLimit = 48.0
while index < len(csvFile) :
if (float(csvFile[index][5])) >= NlyLimit :
ldata.append(float(csvFile[index][8]))
vdata.append(float(csvFile[index][9]))
index += 1
# Open CSV File for Wise Version 3
datafileW3 = open(r'C:\Users\newye\OneDrive\Documents\GitHub\galSims\misc\wise_hii_V1.3_hrds.csv', 'r')
csvFileW3 = []
for row in datafileW3:
csvFileW3.append(row.strip().split(','))
# Save x, y, and l Info from Simulated Data to new list
vDataW3 = list()
lDataW3 = list()
indexW3 = 1
while indexW3 < len(csvFileW3) :
try :
lW3 = float(csvFileW3[indexW3][2])
vW3 = float(csvFileW3[indexW3][9])
regType = str(csvFileW3[indexW3][1])
#d = float(csvFile[index][32])
if float(lW3) > 180 :
lW3 = lW3-360
else :
lW3 = lW3
if (regType == " K") or (regType == " C") :
lDataW3.append(lW3)
vDataW3.append(vW3)
except :
pass
indexW3 += 1
fig, (ax1,ax2) = plt.subplots(2, sharex=True, sharey=True)
ax1.scatter(ldata,vdata,s=pointSize, facecolors = 'black', edgecolors = 'none')
ax1.set_ylim([-200,200])
ax1.set_xlim([-180,180])
#ax1.set_xticks([-180,-120,-60,0,60,120,180])
ax1.invert_xaxis()
ax2.scatter(lDataW3,vDataW3,s=pointSize, facecolors = 'black', edgecolors = 'none')
ax2.set_ylim([-175,175])
ax2.set_xlim([-180,180])
#ax2.set_xticks([-180,-120,-60,0,60,120,180],[180,240,300,0,60,120,180])
ax2.invert_xaxis()
#ax1.plot([-90, -90], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax1.plot([0, 0], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax1.plot([90, 90], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax2.plot([-90, -90], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax2.plot([0, 0], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax2.plot([90, 90], [-15, 15], '--', lw=2,alpha=0.7, color="black")
#ax2.text(45,15,"I", ha='center',va='center',color='black',fontsize=25)
#ax2.text(135,15,"II", ha='center',va='center',color='black',fontsize=25)
#ax2.text(-135,15,"III", ha='center',va='center',color='black',fontsize=25)
#ax2.text(-45,15,"IV", ha='center',va='center',color='black',fontsize=25)
#ax1.spines['right'].set_visible(False)
#ax1.spines['left'].set_visible(False)
#ax1.spines['top'].set_visible(False)
#ax1.spines['bottom'].set_visible(False)
ax1.xaxis.tick_top()
#ax1.yaxis.tick_right()
#ax1.yaxis.set_ticks_position('left')
#ax1.xaxis.set_ticks_position('top')
#ax1.yaxis.set_ticks_position('right')
ax1.xaxis.set_tick_params(width=2,labelsize=20)
ax1.yaxis.set_tick_params(width=2,labelsize=20)
#ax2.spines['right'].set_visible(False)
#ax2.spines['left'].set_visible(False)
#ax2.spines['top'].set_visible(False)
#ax2.spines['bottom'].set_visible(False)
ax2.xaxis.tick_bottom()
#ax2.yaxis.tick_right()
#ax1.yaxis.set_ticks_position('left')
#ax1.xaxis.set_ticks_position('bottom')
#ax1.yaxis.set_ticks_position('right')
ax2.xaxis.set_tick_params(width=2,labelsize=20)
ax2.yaxis.set_tick_params(width=2,labelsize=20)
plt.setp((ax1,ax2))
#plt.xticks([-180,-135,-90,-45,0,45,90,135,180],[r"$\/180^{\circ}$",r"$\/225^{\circ}$",r"$\/270^{\circ}$",r"$\/300^{\circ}$",r"$\/0^{\circ}$",r"$\/45^{\circ}$",r"$\/90^{\circ}$",r"$\/135^{\circ}$",r"$\/180^{\circ}$"])
plt.xticks([-180,-135,-90,-45,0,45,90,135,180],[r"$\/180^{\circ}$",r"$\/$",r"$\/270^{\circ}$",r"$\/$",r"$\/0^{\circ}$",r"$\/$",r"$\/90^{\circ}$",r"$\/$",r"$\/180^{\circ}$"])
plt.yticks([-150,-100,-50,0,50,100,150],[-150,-100,-50,0,50,100,150])
fig.text(0.93,0.33, "Observed", ha='center',va='center',color='black',rotation=90,fontsize=25)
fig.text(0.93,0.72,"Simulated", ha='center',va='center',color='black',rotation=90,fontsize=25)
fig.text(0.5, 0.04, "Galactic Longitude", ha='center',va='center',fontsize=35)
fig.text(0.03, 0.5, "LSR Velocity (km s$^{-1}$)", ha='center',va='center',rotation = 90,fontsize=35)
fig.set_size_inches(10,8)
fig.subplots_adjust(hspace=0,wspace=0)
fig.subplots_adjust(left=0.15,bottom=0.15)
fig.savefig('lv_Combine.eps', format='eps', dpi=1000)
fig.savefig('lv_Combine.pdf', format='pdf', dpi=1000)
fig.show()
| gpl-2.0 |
kennethdecker/MagnePlane | paper/images/trade_scripts/boundary_layer_length_writer.py | 4 | 1314 | import numpy as np
import matplotlib.pylab as plt
from openmdao.api import Group, Problem, IndepVarComp
from hyperloop.Python import boundary_layer_sensitivity
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (
('delta_star', .02, {'units' : 'm'}),
('A_pod', 2.0, {'units' : 'm**2'}),
('L_pod', 22.0, {'units' : 'm'}),
('length_calc', True))
root.add('input_vars', IndepVarComp(params), promotes = ['delta_star', 'A_pod', 'L_pod', 'length_calc'])
root.add('p', boundary_layer_sensitivity.BoundaryLayerSensitivity())
root.connect('delta_star', 'p.delta_star')
root.connect('A_pod', 'p.A_pod')
root.connect('L_pod', 'p.L')
root.connect('length_calc', 'p.length_calc')
top.setup()
L_pod = np.linspace(20.0, 40.0, num = 50)
A_pod = np.linspace(2.0, 3.0, num = 3)
A_tube = np.zeros((len(A_pod), len(L_pod)))
for i in range(len(L_pod)):
for j in range(len(A_pod)):
top['L_pod'] = L_pod[i]
top['A_pod'] = A_pod[j]
top.run()
A_tube[j, i] = top['p.A_tube']
np.savetxt('../../../paper/images/data_files/boundary_layer_length_trades/L_pod.txt', L_pod, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/boundary_layer_length_trades/A_tube.txt', A_tube, fmt = '%f', delimiter = '\t', newline = '\r\n')
| apache-2.0 |
ybayle/vqmm | vqmm.py | 1 | 26994 | #!/usr/bin/python
#
# Author Yann Bayle
# E-mail [email protected]
# License MIT
# Created 09/09/2016
# Updated 12/10/2016
# Version 1.0.0
#
# Object Preprocess file from YAAFE and launch VQMM from Thibault Langlois
# You can find the latest version of his algorithm here:
# https://bitbucket.org/ThibaultLanglois/vqmm/downloads
#
# OS Only work on UNIX for the moment
#
# Details - Remove header from CSV (5 lines)
# - Replace scientifc notation by float
# - Replace commas by spaces
# - Check correct numbers of col = 13
# - Check that there are no empty files
# - Check minimum number of frames
# - All non conform files are deplaced in the error folder
# - Manage folds and launch VQMM
#
# Manual 1 Install YAAFE and analyse your songs in a folder ex: /path/YAAFE/
# 2 Download https://github.com/ybayle/vqmm
# 3 Create a fileList.txt containing path & class of YAAFE's files
# 4 Launch: python vqmm.py -d /path/YAAFE/ -f /path/fileList.txt
#
# TODOs - Optimize: 700ms/file for ~1Mo/file ~4800lines/file 13feat/line
# on i7-3770k, 16Go RAM, 3.5GHz, 64bits, Debian 8.2
# - Tell user not to use "NOT_" in his class name or better manage it
# - Train/Test more folds
# - Make parallel: preprocess, each fold and test
# - Instead of display results txt, display fig
# - X "files to classify" in main.c remove dot . and disp progression
# - Enhance help and automatic produce of man and README
# - Assert
# - parameterized epsilon for codebook
# - take in account global var verbose with -v
# - Write a txt file which indicates all param chosen
#
import time
import argparse
import sys
import os
import csv
import shutil
import subprocess
import re
import json
import multiprocessing
import fnmatch
import webbrowser
from functools import partial
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
begin = int(round(time.time() * 1000))
VERBOSE = False
PRINTDEBUG = True
class bcolors:
HOUR = '\033[96m'
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
FILE = '\033[37m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def plotResults(dirName):
# Format of csv files created by VQMM:
# ClassName (tag), tp, tn, fp, fn, precision, recall, fscore
files = []
for fileName in os.listdir(dirName):
if fnmatch.fnmatch(fileName, '*perTag.csv'):
files.append(fileName)
data = {}
for fileName in files:
with open(dirName + fileName, 'r') as res:
resReader = csv.reader(res, delimiter=',')
for row in resReader:
tag = row[0]
if tag in data:
data[tag]["precision"].append(row[5])
data[tag]["recall"].append(row[6])
data[tag]["fScore"].append(row[7])
else:
data[tag] = {
"precision":[row[5]],
"recall":[row[6]],
"fScore":[row[7]]
}
nbMeasure = 3
nbClass = len(data)
nbFolds = len(data[tag]["precision"])
dataPlot = np.zeros((nbClass, nbFolds, nbMeasure))
classIndex = 0
tagName = []
for tag in data:
tagName.append(tag)
dataPlot[classIndex, :, 0] = data[tag]["precision"]
dataPlot[classIndex, :, 1] = data[tag]["recall"]
dataPlot[classIndex, :, 2] = data[tag]["fScore"]
classIndex = classIndex + 1
# Figure display part
plt.close("all")
fig, axes = plt.subplots(nrows=1, ncols=nbClass, figsize=(12, 5))
# rectangular box plot
bplot1 = axes[0].boxplot(dataPlot[0],
vert=True, # vertical box aligmnent
patch_artist=True) # fill with color
bplot2 = axes[1].boxplot(dataPlot[1],
vert=True, # vertical box aligmnent
patch_artist=True) # fill with color
# fill with colors
colors = ['pink', 'lightblue', 'lightgreen']
for bplot in (bplot1, bplot2):
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
index = 0
# tagName = ["Class 1", "Class 2"]
for ax in axes:
ax.yaxis.grid(True)
ax.set_ylabel('Value of measure')
ax.set_xlabel(tagName[index])
index = index + 1
ax.set_ylim([0.0, 1.0])
# add x-tick labels
plt.setp(axes, xticks=[y+1 for y in range(nbMeasure)],
xticklabels=['Precision', 'Recall', 'F-Score'])
imgName = dirName + "figure.png"
plt.savefig(imgName, dpi=100)
webbrowser.open(imgName)
def extractPathAndClass(s):
delimiter = '/'
insertStr = "processed/"
if insertStr in s:
insertStr = ""
limit = s.rindex(delimiter) + len(delimiter)
line = s[:limit] + insertStr + s[limit:]
index = line.index('\t')
return line[:index], line[index+1:-1]
def validScientificNotation(val):
pattern = re.compile("-?[0-9]\.[0-9]+[Ee][+-][0-9]{2}")
if val:
if pattern.match(val):
return True
else:
return False
else:
return False
def validAndConvertFile(inDIR, outDIR, errDIR, filename):
inFileName = inDIR + filename
errFileName = errDIR + filename
# Remove empty file or with too few lines to be analyzed
if os.stat(inFileName).st_size > 4000:
# Above line is slightly faster (20microsec) than
# os.stat(inFileName).st_size
outFileName = outDIR + filename
outFile = open(outFileName, 'w')
fileInvalid = False
try:
with open(inFileName, 'r') as data:
reader = csv.reader(data)
dismissedLines = 5
requiredNumCol = 13
for row in reader:
if 0 == dismissedLines:
if not len(row) < requiredNumCol:
str2write = ""
for col in row:
if validScientificNotation(col):
str2write = str2write + "{0:.14f}".format(float(col)) + " "
else:
fileInvalid = True
break
outFile.write(str2write[:-1] + "\n")
else:
fileInvalid = True
break
else:
dismissedLines = dismissedLines - 1
finally:
outFile.close()
if fileInvalid:
# os.remove(outFileName) # TODO uncomment
if not os.path.exists(errDIR):
os.makedirs(errDIR)
shutil.move(inFileName, errFileName)
else:
if not os.path.exists(errDIR):
os.makedirs(errDIR)
shutil.move(inFileName, errFileName)
def find_between_r(s, first, last):
"""Description of find_between_r (s, first, last)
:param s: A string
:param first: Beginning delimiter for substring
:param last: Ending delimiter for substring
:type s: string
:type first: string
:type last: string
:return: Substring contained between first and last by reading s from the
end to the beginning
:rtype: string
:Example:
s = "/media/sf_DATA/results/VQMM_YAAFE/ZAA641200304_audio_full_mono_22k.wav.mfcc.csv I"
print(find_between_r(s, "/", "\t"))
ZAA641200304_audio_full_mono_22k.wav.mfcc.csv
"""
try:
start = s.rindex(first) + len(first)
end = s.rindex(last, start)
return s[start:end]
except ValueError:
return ""
def curTime():
return bcolors.HOUR + datetime.now().time().strftime("%Hh%Mm%Ss") + " " + bcolors.ENDC
def printError(msg):
print(bcolors.BOLD + bcolors.ERROR + "ERROR:\n" + msg + "\nProgram stopped" + bcolors.ENDC)
sys.exit()
def printTitle(msg):
if PRINTDEBUG:
print(curTime() + bcolors.BOLD + bcolors.OKGREEN + msg + bcolors.ENDC)
def printMsg(msg):
if VERBOSE:
print(bcolors.HEADER + msg + bcolors.ENDC)
def printInfo(msg):
if PRINTDEBUG:
print(curTime() + bcolors.OKBLUE + msg + bcolors.ENDC)
def printWarning(msg):
if PRINTDEBUG:
print(bcolors.WARNING + msg + bcolors.ENDC)
def printFile(fileName):
if os.path.isfile(fileName):
printInfo(fileName + ":")
print(bcolors.FILE)
with open(fileName, 'r') as fn:
for line in fn:
print(line[:-1])
print(bcolors.ENDC)
else:
printWarning("File not found: " + fileName)
def runTrainTestOnFold(i, args):
tmpDIR = args["tmpDIR"]
codebookFile = args["cbkDir"] + "codebook.cbk"
resultsDir = tmpDIR + "Results/"
modelsDir = tmpDIR + "Models/"
# print("runTrainTestOnFold")
# print("i " + str(i) + " args " + str(args))
trainFileList = args["foldsName"][i]
trainOn = list(set(args["foldsName"]) - set([trainFileList]))
tmpNb = [str(val) for val in range(1, args["nbFolds"]+1)]
tmpNb.remove(trainFileList[-5])
foldsNumber = ''.join(str(x) for x in tmpNb)
testFileList = args["tmpDIR"] + "testFolds_" + foldsNumber + ".csv"
os.system("cat " + " ".join(trainOn) + " > " + testFileList)
printInfo("Training Model on Fold " + str(i+1))
with open(args["cbkDir"]+args["projName"]+"/"+str(i)+"_train.txt", 'w') as f:
subprocess.call([args["pathVQMM"] + 'vqmm', '-quiet', 'n', '-output-dir', modelsDir, '-list-of-files', trainFileList, '-epsilon', args["epsilon"], '-smoothing', args["smoothing"], '-codebook', codebookFile, '-make-tag-models'], stdout=f, stderr=f)
# subprocess.call([args["pathVQMM"] + 'vqmm', '-quiet', 'n', '-output-dir', modelsDir, '-list-of-files', trainFileList, '-epsilon', args["epsilon"], '-codebook', codebookFile, '-make-tag-models'], stdout=f, stderr=f)
modelsFile = tmpDIR + "Models" + str(i) + ".csv"
with open(modelsFile, 'w') as mf:
for className in args["classNames"]:
mf.write(modelsDir + className + "$"+ find_between_r(trainFileList, "/", ".") + ".mm\n")
printInfo("Testing Model on Fold " + str(i+1))
if not os.path.exists(resultsDir):
os.makedirs(resultsDir)
# printInfo("Approx 515ms per file")
with open(args["cbkDir"]+args["projName"]+"/"+str(i)+"_test.txt", 'w') as f:
subprocess.call([args["pathVQMM"] + 'vqmm', '-tagify', '-output-dir', resultsDir, '-models', modelsFile, '-codebook', codebookFile, '-list-of-files', testFileList], stdout=f, stderr=f)
# os.remove(testFileList) # TODO uncomment
# os.remove(modelsFile) # TODO uncomment
printInfo("Fold " + str(i+1) + " tested")
def runVQMM(args):
fileListWithClass = args["fileListWithClass"]
tmpDIR = args["tmpDIR"]
randomSeed = str(args["randSeedCbk"])
codebookSize = str(args["cbkSize"])
codebookFile = args["cbkDir"] + "codebook.cbk"
resultsDir = tmpDIR + "Results/"
tmpModels = tmpDIR + "tmpModels.csv"
modelsDir = tmpDIR + "Models/"
if not os.path.exists(modelsDir):
os.makedirs(modelsDir)
printTitle("Compiling VQMM")
os.system("make -C " + args["pathVQMM"] + "src/")
if os.path.isfile(codebookFile):
printTitle("VQMM Codebook already created for this codebook size")
else:
printTitle("Creating VQMM Codebook")
with open(args["cbkDir"]+"cbk_stderr.txt", 'w') as f:
subprocess.call([args["pathVQMM"] + 'vqmm', '-quiet', 'n', '-list-of-files', fileListWithClass, '-random', randomSeed, '-codebook-size', codebookSize, '-codebook', codebookFile], stderr=f)
if args["nbFolds"] == 1:
printTitle("Training Model")
# subprocess.call([args["pathVQMM"] + 'vqmm', '-quiet', 'n', '-output-dir', modelsDir, '-list-of-files', fileListWithClass, '-epsilon', args["epsilon"], '-smoothing', args["smoothing"], '-codebook', codebookFile, '-make-tag-models'])
subprocess.call([args["pathVQMM"] + 'vqmm', '-quiet', 'n', '-output-dir', modelsDir, '-list-of-files', fileListWithClass, '-epsilon', args["epsilon"], '-codebook', codebookFile, '-make-tag-models'])
modelsFile = tmpDIR + "Models.csv"
os.system("readlink -f $(echo \"" + modelsDir + "*\") >> " + tmpModels)
os.system("sed -n '/NOT_/!p' " + tmpModels + " >> " + modelsFile)
# os.remove(tmpModels) # TODO uncomment
printTitle("Testing Model")
if not os.path.exists(resultsDir):
os.makedirs(resultsDir)
printInfo("Approx 515ms per file")
subprocess.call([args["pathVQMM"] + 'vqmm', '-tagify', '-output-dir', resultsDir, '-models', modelsFile, '-codebook', codebookFile, '-list-of-files', fileListWithClass])
# os.remove(modelsFile) # TODO uncomment
printTitle("Results:")
displayedRes = False
for fileName in os.listdir(resultsDir):
if fileName.endswith("summary.txt"):
printFile(resultsDir+fileName)
displayedRes = True
elif fileName.endswith("perTag.txt"):
printFile(resultsDir+fileName)
displayedRes = True
if not displayedRes:
printError("Error during VQMM, no results to display, see " + args["analysisFolder"] + " for more details.")
else:
generateFolds(args)
printWarning("TODO manage inversion of Train and Test Set")
# Parallel computing on each TrainTestFolds
printTitle("Parallel train & test of folds")
partialRunTrainTestOnFold = partial(runTrainTestOnFold, args=args)
pool = multiprocessing.Pool(args["nbFolds"])
pool.map(partialRunTrainTestOnFold, range(args["nbFolds"])) #make our results with a map call
pool.close() #we are not adding any more processes
pool.join() #tell it to wait until all threads are done before going on
printTitle("Display resulting image in default browser")
plotResults(resultsDir)
def createDir(dirName):
if not os.path.exists(dirName):
os.makedirs(dirName)
return True
else:
return False
def preprocess(args):
printTitle("Preprocessing")
inDIR = args["inDIR"]
fileWithClass = args["fileWithClass"]
if inDIR[-1] != '/' and inDIR[-1] != '\\':
inDIR = inDIR + '/'
errDIR = inDIR + "error/"
outDIR = inDIR + "processed/"
# Create folder and subfolders if does not exists
createDir(args["analysisFolder"])
args["projDir"] = args["analysisFolder"] + inDIR[:-1][inDIR[:-1].rindex("/")+1:] + "/"
createDir(args["projDir"])
args["cbkDir"] = args["projDir"] + "CodeBookSize_" + str(args["cbkSize"]).zfill(3) + "/"
createDir(args["cbkDir"])
projName = str(args["randSeedCbk"]) + "RandCbk_"
projName = projName + str(args["randSeedFold"]) + "RandFold_"
projName = projName + str(args["nbFolds"]) + "Fold"
if args["nbFolds"] > 1:
projName = projName + "s_"
else:
projName = projName + "_"
projName = projName + str(args["epsilon"]) + "Eps_"
projName = projName + str(args["smoothing"]) + "Smooth"
if args["invertTrainTest"]:
projName = projName + "_I"
args["projName"] = projName
tmpDIR = args["cbkDir"] + projName + "/"
if not createDir(tmpDIR):
printError("A project with same params exists")
tmpFileNames = args["projDir"] + "files.txt"
fileListWithClassJSON = args["projDir"] + "filelist.json"
fileListWithClass = args["projDir"] + "filelist.csv"
classes = None
classNames = []
if not os.path.exists(outDIR):
os.system("ls " + inDIR + " > " + tmpFileNames)
os.makedirs(outDIR)
printTitle("Validating and converting files")
with open(tmpFileNames, 'r') as filenames:
curFileNum = 0
for filename in filenames:
curFileNum = curFileNum + 1
sys.stdout.write("\r\t" + str(curFileNum))
sys.stdout.flush()
filename = filename[:-1]
if not os.path.isdir(filename):
validAndConvertFile(inDIR, outDIR, errDIR, filename)
sys.stdout.write('\n')
sys.stdout.flush()
printTitle("Associating classes")
os.system("ls " + outDIR + " > " + tmpFileNames)
with open(tmpFileNames) as f:
linesNoClass = f.readlines()
# os.remove(tmpFileNames) # TODO uncomment
with open(fileWithClass) as f:
linesWithClass = f.readlines()
curLine = 0
for line in linesWithClass:
curLine = curLine + 1
sys.stdout.write("\r\t" + str(curLine))
sys.stdout.flush()
tmpLine = find_between_r(line, "/", "\t") + "\n"
if tmpLine in linesNoClass:
itemPath, itemClass = extractPathAndClass(line)
if not classes:
classes = {itemClass: [itemPath]}
elif not itemClass in classes:
classes[itemClass] = [itemPath]
else:
classes[itemClass].append(itemPath)
sys.stdout.write('\n')
if args["nbFolds"] > 1:
with open(fileListWithClassJSON, 'w') as fp:
json.dump(classes, fp, sort_keys=True, indent=2)
for key in classes:
with open(fileListWithClass, 'a') as fp:
for line in classes[key]:
fp.write(str(line) + "\t" + str(key) + "\n")
classNames.append(key)
printTitle("Preprocessing done")
else:
if not os.path.isfile(fileListWithClass):
fileListWithClass = args["fileWithClass"]
with open(fileListWithClass, 'r') as fp:
for line in fp:
itemPath, itemClass = extractPathAndClass(line)
if not classes:
classes = {itemClass: [itemPath]}
elif not itemClass in classes:
classes[itemClass] = [itemPath]
else:
classes[itemClass].append(itemPath)
classNames.append(itemClass)
fileListWithClass = args["projDir"] + "filelist.csv"
if not os.path.isfile(fileListWithClass):
for key in classes:
with open(fileListWithClass, 'a') as fp:
for line in classes[key]:
fp.write(str(line) + "\t" + str(key) + "\n")
with open(fileListWithClassJSON, 'w') as fp:
json.dump(classes, fp, sort_keys=True, indent=2)
printTitle("Files already preprocessed")
args["classNames"] = list(set(classNames))
args["tmpDIR"] = tmpDIR
args["fileListWithClass"] = fileListWithClass
args["fileListWithClassJSON"] = fileListWithClassJSON
return args
def gatherArgs(argv):
parser = argparse.ArgumentParser(description="Use extracted features from YAAFE and classify them with VQMM.")
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"-i",
"--invert",
help="invert train and test set",
action="store_true")
parser.add_argument(
"-d",
"--dir",
type=str,
metavar="DIR",
help="directory where YAAFE features are stored")
parser.add_argument(
"-f",
"--file",
type=str,
metavar="FILE",
help="file containing paths and classes separated by a tab")
parser.add_argument(
"-n",
"--nbFolds",
default=1,
type=int,
metavar="NBFOLDS",
help="number of Folds to be used for the classification, must be >= 1")
parser.add_argument(
"-r",
"--randFolds",
default=28,
type=int,
metavar="RANDFOLDS",
help="random seed used for splitting folds")
parser.add_argument(
"-s",
"--seedCbk",
type=int,
metavar="SEEDCBK",
help="random seed for vqmm codebook")
parser.add_argument(
"-c",
"--cbkSize",
type=int,
metavar="CBKSIZE",
help="size of the codebook")
parser.add_argument(
"-p",
"--pathVQMM",
type=str,
metavar="PATHVQMM",
help="path to Thibault Langlois' VQMM folder")
parser.add_argument(
"-e",
"--epsilon",
type=float,
metavar="EPSILON",
help="Epsilon defines the stopping criteria of k-means. It must be >= 0.")
parser.add_argument(
"-m",
"--smoothing",
type=float,
metavar="SMOOTHING",
help="Models' smoothing parameter. The events that has \
not occured during training cannot have null probability. \
It corresponds to the probability mass which is allocated to \
events which have not occured during training. \
Ex: -m 0.1 attributes 10 percent of probability to those events. \
If it is close to zero, the model does not generalize. \
The lower the parameter the higher the precision \
The higher the paramter, the higher the recall. \
In principle this parameter should be < 0.5 \
It must varies between [0.0 ; 1.0]")
tmpArgs = parser.parse_args()
pathVQMM = "./ThibaultLanglois_VQMM/"
if tmpArgs.pathVQMM:
pathVQMM = tmpArgs.pathVQMM
inDIR = "./data/"
fileWithClass = "./filelist.txt"
verbose = False
if tmpArgs.verbose:
verbose = True
VERBOSE = True
invertTrainTest = False
if tmpArgs.invert:
invertTrainTest = True
nbFolds = 1
if tmpArgs.nbFolds:
if tmpArgs.nbFolds >= 1:
nbFolds = tmpArgs.nbFolds
else:
printError("Wrong number of Folds")
randSeedFold = 1
if tmpArgs.randFolds:
randSeedFold = tmpArgs.randFolds
randSeedCbk = 50
if tmpArgs.seedCbk:
randSeedCbk = tmpArgs.seedCbk
epsilon = 0.00001
if tmpArgs.epsilon:
epsilon = tmpArgs.epsilon
if epsilon < 0:
epsilon = 0.00001
printWarning("Epsilon cannot be lower than 0\nEpsilon set to " + str(epsilon))
epsilon = str(epsilon)
smoothing = 0.000000001
if tmpArgs.smoothing:
smoothing = tmpArgs.smoothing
if smoothing < 0:
smoothing = 0.00001
printWarning("Smoothing cannot be lower than 0\nSmoothing set to " + str(smoothing))
elif smoothing >= 0.5:
printWarning("Unexpected behavior when Smoothing is greater than 0.5")
smoothing = str(smoothing)
cbkSize = 75
if tmpArgs.cbkSize:
cbkSize = tmpArgs.cbkSize
if tmpArgs.dir and tmpArgs.file:
if os.path.exists(tmpArgs.dir):
inDIR = tmpArgs.dir
else:
printError("Folder does not exists : " + tmpArgs.dir)
if os.path.isfile(tmpArgs.file):
fileWithClass = tmpArgs.file
else:
printError("File does not exists : " + tmpArgs.dir)
elif tmpArgs.dir != tmpArgs.file:
printError("You must input an input dir AND a filelist with paths and classes")
printMsg("Sample folder " + inDIR)
printMsg("Path and classes stored in " + fileWithClass)
printMsg("Number of Folds " + str(nbFolds))
printMsg("Random Seed for Folds " + str(randSeedFold))
printMsg("Random Seed for Codebook " + str(randSeedCbk))
printMsg("Codebook size " + str(cbkSize))
printMsg("Invert Train and Test Set " + str(invertTrainTest))
printMsg("Path to Thibault Langlois' VQMM folder " + pathVQMM)
args = {"inDIR":inDIR}
args["fileWithClass"] = fileWithClass
args["verbose"] = verbose
args["invertTrainTest"] = invertTrainTest
args["nbFolds"] = nbFolds
args["randSeedFold"] = randSeedFold
args["randSeedCbk"] = randSeedCbk
args["cbkSize"] = cbkSize
args["pathVQMM"] = pathVQMM
args["epsilon"] = epsilon
args["smoothing"] = smoothing
args["analysisFolder"] = "./analysis/"
return args
def generateFolds(args):
printTitle("Generating random split for folds")
fileListWithClassJSON = args["fileListWithClassJSON"]
nbFolds = args["nbFolds"]
randSeedFold = args["randSeedFold"]
invertTrainTest = args["invertTrainTest"]
np.random.seed(randSeedFold)
with open(fileListWithClassJSON) as data_file:
paths = json.load(data_file)
# os.remove(fileListWithClassJSON) # TODO uncomment
tmpSelected = []
for i in range(0, args["nbFolds"]):
tmpSelected.append([])
for key in paths:
newSize = int(round(len(paths[key])/nbFolds))
if 0 == newSize:
printError("The number of folds is greater than the number of data available")
selected = np.random.choice(paths[key], size=newSize, replace=False)
tmpSelected[0] = selected
remain = list(set(paths[key]) - set(selected))
for n in range(1, args["nbFolds"]-1):
tmpSel = np.random.choice(remain, size=newSize, replace=False)
sel = tmpSel
remain = list(set(remain) - set(sel))
tmpSelected[n] = sel
tmpSelected[-1] = remain
foldsName = []
for i in range(0, nbFolds):
foldFileName = args["tmpDIR"] + "fold" + str(i+1) + ".csv"
with open(foldFileName, "a") as fold:
try:
for line in tmpSelected[i]:
fold.write(str(line) + "\t" + str(key) + "\n")
except:
printError("The number of folds is greater than the number of data available")
foldsName.append(foldFileName)
args["foldsName"] = list(set(foldsName))
def main(argv):
"""Description of main
:param argv[1]: Folder containing raw data from YAAFE
:param argv[2]: File containing path to previous files
:type argv[1]: string
:type argv[2]: string
:Example:
python vqmm.py /path/YAAFE/ /path/fileList.txt
.. warnings:: argv[1] must finish by '/'
.. note:: argv[2] must contain path followed by a tab and the item's class
"""
args = gatherArgs(argv)
printInfo("Approx. 700ms per file: go grab a tea!")
args = preprocess(args)
runVQMM(args)
printInfo("More details available in " + os.path.abspath(args["analysisFolder"]))
printTitle("Finished in " + str(int(round(time.time() * 1000)) - begin) + "ms")
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
sniemi/EuclidVisibleInstrument | support/gaussians.py | 1 | 7161 | """
Functions related to 2D gaussian functions and comparing ellipticities
derived either analytically or using quadrupole moments.
:requires: NumPy
:requires: matplotlib
:author: Sami-Matias Niemi
:contact: [email protected]
:version: 0.2
"""
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import math, datetime, pprint
from analysis import shape
from support import logger as lg
from support import files
def Gaussian2D(x, y, sizex, sizey, sigmax, sigmay):
"""
Create a circular symmetric Gaussian centered on x, y.
:param x: x coordinate of the centre
:type x: float
:param y: y coordinate of the centre
:type y: float
:param sigmax: standard deviation of the Gaussian in x-direction
:type sigmax: float
:param sigmay: standard deviation of the Gaussian in y-direction
:type sigmay: float
:return: circular Gaussian 2D profile and x and y mesh grid
:rtype: dict
"""
#x and y coordinate vectors
Gyvect = np.arange(1, sizey + 1)
Gxvect = np.arange(1, sizex + 1)
#meshgrid
Gxmesh, Gymesh = np.meshgrid(Gxvect, Gyvect)
#normalizers
sigx = 1. / (2. * sigmax**2)
sigy = 1. / (2. * sigmay**2)
#gaussian
exponent = (sigx * (Gxmesh - x)**2 + sigy * (Gymesh - y)**2)
Gaussian = np.exp(-exponent) / (2. * math.pi * sigmax*sigmay)
output = dict(GaussianXmesh=Gxmesh, GaussianYmesh=Gymesh, Gaussian=Gaussian)
return output
def plot3D(data):
"""
Plot a 3d image of the input data. Assumes that the input dictionary
contains X, Y, and Z.
:param data: input data including X and Y mesh and Z-values
:type data: dict
"""
fig = plt.figure(figsize=(12,12))
rect = fig.add_subplot(111, visible=False).get_position()
ax = Axes3D(fig, rect)
surf = ax.plot_surface(data['GaussianXmesh'],
data['GaussianYmesh'],
data['Gaussian'],
rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=10)
plt.savefig('gaussian.pdf')
def plotEllipticityDependency(data, ellipticity, log):
"""
Generate a simple plot: size of the Gaussian weighting function vs. derived ellipticity.
"""
x = []
y = []
for sigma in range(1, 50):
settings = dict(sigma=sigma)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
x.append(sigma)
y.append(results['ellipticity'])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'bo-')
ax.plot([min(x), max(x)], [ellipticity, ellipticity], 'k--')
ax.set_xlabel(r'Gaussian Weighting $\sigma$ [arcseconds]')
ax.set_ylabel('Measured Ellipticity')
ax.set_ylim(0, 1.01)
plt.savefig('EvsSigma.pdf')
plt.close()
def ellipticityFromSigmas(sigmax, sigmay):
"""
Calculate ellipticity from standard deviations of a 2D Gaussian.
:param sigmax: standard deviation in x direction
:type sigmax: float or ndarray
:param sigmay: standard deviation in y direction
:type sigmay: float or ndarray
:return: ellipticity
:rtype: float or ndarray
"""
e = (np.float(sigmax)**2 - sigmay**2) / (sigmax**2 + sigmay**2)
return np.abs(e)
def size():
"""
:requires: sympy
"""
from sympy import Symbol
from sympy import integrate, exp, pi
x = Symbol('x')
y = Symbol('y')
mu = Symbol('mu')
sigma = Symbol('sigma')
tmpx = (x - mu)
tmpy = (y - mu)
integrand = (1/(2*pi*sigma**2)) * exp(-((tmpx**2 + tmpy**2) / (2*sigma**2) ))
res = integrate(integrand, x)
pprint.pprint(res)
def measureGaussianR2(log):
#gaussian
sigma = 2. / (2. * math.sqrt(2.*math.log(2)))
Gaussian = shape.shapeMeasurement(np.zeros((100, 100)), log).circular2DGaussian(50, 50, sigma)['Gaussian']
settings = dict(sigma=sigma, weighted=False)
sh = shape.shapeMeasurement(Gaussian, log, **settings)
results = sh.measureRefinedEllipticity()
print
print results['R2']
print
#sh.writeFITS(Gaussian, 'GaussianSmall.fits')
def testFiles():
#testing part, looks for blob?.fits and psf.fits to derive centroids and ellipticity
import pyfits as pf
import glob as g
from support import logger as lg
import sys
files = g.glob('blob?.fits')
log = lg.setUpLogger('shape.log')
log.info('Testing shape measuring class...')
for file in files:
log.info('Processing file %s' % file)
data = pf.getdata(file)
sh = shape.shapeMeasurement(data, log)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'psf1x.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
sh = shape.shapeMeasurement(data, log)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'stamp.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
settings = dict(sigma=10.0)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
file = 'gaussian.fits'
log.info('Processing file %s' % file)
data = pf.getdata(file)
settings = dict(sampling=0.2)
sh = shape.shapeMeasurement(data, log, **settings)
results = sh.measureRefinedEllipticity()
sh.writeFITS(results['GaussianWeighted'], file.replace('.fits', 'Gweighted.fits'))
print file
pprint.pprint(results)
print
log.info('All done\n\n')
if __name__ == '__main__':
log = lg.setUpLogger('gaussians.log')
log.info('Testing gaussians...')
xsize, ysize = 300, 300
xcen, ycen = 150, 150
sigmax = 27.25
sigmay = 14.15
#calculate ellipticity from Sigmas
e = ellipticityFromSigmas(sigmax, sigmay)
#generate a 2D gaussian with given properties...
gaussian2d = Gaussian2D(xcen, ycen, xsize, ysize, sigmax, sigmay)
#plot
plot3D(gaussian2d)
#write FITS file
files.writeFITS(gaussian2d['Gaussian'], 'gaussian.fits')
#calculate shape and printout results
settings = dict(sigma=15., weighted=False)
sh = shape.shapeMeasurement(gaussian2d['Gaussian'], log, **settings)
results = sh.measureRefinedEllipticity()
print
pprint.pprint(results)
print e, (e - results['ellipticity']) / e * 100.
#generate a plot sigma vs ellipticity for a given Gaussian
plotEllipticityDependency(gaussian2d['Gaussian'], e, log)
#measureGaussianR2
measureGaussianR2(log)
#derive FWHM - R2 relation... not really working
#size()
#test many files
testFiles()
log.info('All done\n\n') | bsd-2-clause |
prashantvyas/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.