repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
harisbal/pandas | pandas/core/resample.py | 2 | 53145 | from datetime import timedelta
import numpy as np
import warnings
import copy
from textwrap import dedent
import pandas as pd
from pandas.core.groupby.base import GroupByMixin
from pandas.core.groupby.ops import BinGrouper
from pandas.core.groupby.groupby import (
_GroupBy, GroupBy, groupby, _pipe_template
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.generic import SeriesGroupBy, PanelGroupBy
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import (DateOffset, Tick, Day,
delta_to_nanoseconds, Nano)
from pandas.core.indexes.period import PeriodIndex
from pandas.errors import AbstractMethodError
import pandas.core.algorithms as algos
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas._libs import lib
from pandas._libs.tslibs import Timestamp, NaT
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.util._decorators import Appender, Substitution
from pandas.core.generic import _shared_docs
_shared_docs_kwargs = dict()
class Resampler(_GroupBy):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : pandas object
groupby : a TimeGrouper object
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatement
Notes
-----
After resampling, see aggregate, apply, and transform functions.
Returns
-------
a Resampler of the appropriate type
"""
# to the groupby descriptor
_attributes = ['freq', 'axis', 'closed', 'label', 'convention',
'loffset', 'base', 'kind']
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
self.groupby = groupby
self.keys = None
self.sort = True
self.axis = axis
self.kind = kind
self.squeeze = False
self.group_keys = True
self.as_index = True
self.exclusions = set()
self.binner = None
self.grouper = None
if self.groupby is not None:
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self.groupby, k))
for k in self._attributes if
getattr(self.groupby, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self.groupby, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
def __iter__(self):
"""
Resampler iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
See Also
--------
GroupBy.__iter__
"""
self._set_binner()
return super(Resampler, self).__iter__()
@property
def obj(self):
return self.groupby.obj
@property
def ax(self):
return self.groupby.ax
@property
def _typ(self):
""" masquerade for compat as a Series or a DataFrame """
if isinstance(self._selected_obj, pd.Series):
return 'series'
return 'dataframe'
@property
def _from_selection(self):
""" is the resampling from a DataFrame column or MultiIndex level """
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return (self.groupby is not None and
(self.groupby.key is not None or
self.groupby.level is not None))
def _convert_obj(self, obj):
"""
provide any conversions for the object in order to correctly handle
Parameters
----------
obj : the object to be resampled
Returns
-------
obj : converted object
"""
obj = obj._consolidate()
return obj
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _set_binner(self):
"""
setup our binners
cache these as we are an immutable object
"""
if self.binner is None:
self.binner, self.grouper = self._get_binner()
def _get_binner(self):
"""
create the BinGrouper, assume that self.set_grouper(obj)
has already been called
"""
binner, bins, binlabels = self._get_binner_for_time()
bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)
return binner, bin_grouper
def _assure_grouper(self):
""" make sure that we are creating our binner & grouper """
self._set_binner()
@Substitution(klass='Resampler',
versionadded='.. versionadded:: 0.23.0',
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum value in
one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return super(Resampler, self).pipe(func, *args, **kwargs)
_agg_doc = dedent("""
Examples
--------
>>> s = pd.Series([1,2,3,4,5],
index=pd.date_range('20130101', periods=5,freq='s'))
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,
label=left, convention=start, base=0]
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum','mean','max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result' : lambda x: x.mean() / x.std(),
'total' : np.sum})
total result
2013-01-01 00:00:00 3 2.121320
2013-01-01 00:00:02 7 4.949747
2013-01-01 00:00:04 5 NaN
See also
--------
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
def aggregate(self, func, *args, **kwargs):
self._set_binner()
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
how = func
grouper = None
result = self._groupby_and_aggregate(how,
grouper,
*args,
**kwargs)
result = self._apply_loffset(result)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
return self._selected_obj.groupby(self.groupby).transform(
arg, *args, **kwargs)
def _downsample(self, f):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
self._set_binner()
grouper = self.grouper
if subset is None:
subset = self.obj
grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis)
# try the key selection
try:
return grouped[key]
except KeyError:
return grouped
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
""" re-evaluate the obj with a groupby aggregation """
if grouper is None:
self._set_binner()
grouper = self.grouper
obj = self._selected_obj
try:
grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
except TypeError:
# panel grouper
grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
try:
if isinstance(obj, ABCDataFrame) and compat.callable(how):
# Check if the function is reducing or not.
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
except Exception:
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _apply_loffset(self, result):
"""
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(self.loffset, (DateOffset, timedelta,
np.timedelta64)) and
isinstance(result.index, DatetimeIndex) and
len(result.index) > 0
)
if needs_offset:
result.index = result.index + self.loffset
self.loffset = None
return result
def _get_resampler_for_grouping(self, groupby, **kwargs):
""" return the correct class for resampling with groupby """
return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
def _wrap_result(self, result):
""" potentially wrap any results """
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
obj = self.obj
result.index = obj.index._shallow_copy(freq=to_offset(self.freq))
result.name = getattr(obj, 'name', None)
return result
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('pad', limit=limit)
ffill = pad
def nearest(self, limit=None):
"""
Fill values with nearest neighbor starting from center
Parameters
----------
limit : integer, optional
limit of how many values to fill
.. versionadded:: 0.21.0
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('nearest', limit=limit)
def backfill(self, limit=None):
"""
Backward fill the new missing values in the resampled data.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency). The backward fill will replace NaN values that appeared in
the resampled data with the next value in the original sequence.
Missing values that existed in the original data will not be modified.
Parameters
----------
limit : integer, optional
Limit of how many values to fill.
Returns
-------
Series, DataFrame
An upsampled Series or DataFrame with backward filled NaN values.
See Also
--------
bfill : Alias of backfill.
fillna : Fill NaN values using the specified method, which can be
'backfill'.
nearest : Fill NaN values with nearest neighbor starting from center.
pad : Forward fill NaN values.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'backfill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
>>> s.resample('30min').backfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').backfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
Resampling a DataFrame that has missing values:
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').backfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
>>> df.resample('15min').backfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
2018-01-01 00:30:00 NaN 3.0
2018-01-01 00:45:00 NaN 3.0
2018-01-01 01:00:00 NaN 3.0
2018-01-01 01:15:00 NaN NaN
2018-01-01 01:30:00 6.0 5.0
2018-01-01 01:45:00 6.0 5.0
2018-01-01 02:00:00 6.0 5.0
"""
return self._upsample('backfill', limit=limit)
bfill = backfill
def fillna(self, method, limit=None):
"""
Fill missing values introduced by upsampling.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency).
Missing values that existed in the original data will
not be modified.
Parameters
----------
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
Method to use for filling holes in resampled data
* 'pad' or 'ffill': use previous valid observation to fill gap
(forward fill).
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
limit : integer, optional
Limit of how many consecutive missing values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with missing values filled.
See Also
--------
backfill : Backward fill NaN values in the resampled data.
pad : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
interpolate : Fill NaN values using interpolation.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
Without filling the missing values you get:
>>> s.resample("30min").asfreq()
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 2.0
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> s.resample('30min').fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
>>> s.resample('30min').fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
2018-01-01 01:30:00 2
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('30min').fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
Missing values present before the upsampling are not affected.
>>> sm = pd.Series([1, None, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: H, dtype: float64
>>> sm.resample('30min').fillna('backfill')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('pad')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('nearest')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
DataFrame resampling is done column-wise. All the same options are
available.
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
"""
return self._upsample(method, limit=limit)
@Appender(_shared_docs['interpolate'] % _shared_docs_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast, **kwargs)
def asfreq(self, fill_value=None):
"""
return the values at the new freq,
essentially a reindex
Parameters
----------
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
See Also
--------
Series.asfreq
DataFrame.asfreq
"""
return self._upsample('asfreq', fill_value=fill_value)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('var', args, kwargs)
return self._downsample('var', ddof=ddof)
@Appender(GroupBy.size.__doc__)
def size(self):
# It's a special case as higher level does return
# a copy of 0-len objects. GH14962
result = self._downsample('size')
if not len(self.ax) and isinstance(self._selected_obj, ABCDataFrame):
result = pd.Series([], index=result.index, dtype='int64')
return result
def quantile(self, q=0.5, **kwargs):
"""
Return value at the given quantile.
.. versionadded:: 0.24.0
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
See Also
--------
Series.quantile
DataFrame.quantile
DataFrameGroupBy.quantile
"""
return self._downsample('quantile', q=q, **kwargs)
# downsample methods
for method in ['sum', 'prod']:
def f(self, _method=method, min_count=0, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method, min_count=min_count)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# downsample methods
for method in ['min', 'max', 'first', 'last', 'mean', 'sem',
'median', 'ohlc']:
def f(self, _method=method, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# groupby & aggregate methods
for method in ['count']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# series only methods
for method in ['nunique']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(SeriesGroupBy, method).__doc__
setattr(Resampler, method, f)
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None):
""" potentially we might have a deprecation warning, show it
but call the appropriate methods anyhow """
if how is not None:
# .resample(..., how='sum')
if isinstance(how, compat.string_types):
method = "{0}()".format(how)
# .resample(..., how=lambda x: ....)
else:
method = ".apply(<func>)"
# if we have both a how and fill_method, then show
# the following warning
if fill_method is None:
warnings.warn("how in .resample() is deprecated\n"
"the new syntax is "
".resample(...).{method}".format(
method=method),
FutureWarning, stacklevel=3)
r = r.aggregate(how)
if fill_method is not None:
# show the prior function call
method = '.' + method if how is not None else ''
args = "limit={0}".format(limit) if limit is not None else ""
warnings.warn("fill_method is deprecated to .resample()\n"
"the new syntax is .resample(...){method}"
".{fill_method}({args})".format(
method=method,
fill_method=fill_method,
args=args),
FutureWarning, stacklevel=3)
if how is not None:
r = getattr(r, fill_method)(limit=limit)
else:
r = r.aggregate(fill_method, limit=limit)
return r
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None)
groupby = kwargs.pop('groupby', None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = copy.copy(parent.groupby)
def _apply(self, f, grouper=None, *args, **kwargs):
"""
dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, compat.string_types):
return getattr(x, f)(**kwargs)
return x.apply(f, *args, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result)
_upsample = _apply
_downsample = _apply
_groupby_and_aggregate = _apply
class DatetimeIndexResampler(Resampler):
@property
def _resampler_for_grouping(self):
return DatetimeIndexResamplerGroupby
def _get_binner_for_time(self):
# this is how we are actually creating the bins
if self.kind == 'period':
return self.groupby._get_time_period_bins(self.ax)
return self.groupby._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
self._set_binner()
how = self._is_cython_func(how) or how
ax = self.ax
obj = self._selected_obj
if not len(ax):
# reset to the new freq
obj = obj.copy()
obj.index.freq = self.freq
return obj
# do we have a regular frequency
if ax.freq is not None or ax.inferred_freq is not None:
if len(self.grouper.binlabels) > len(ax) and how is None:
# let's do an asfreq
return self.asfreq()
# we are downsampling
# we want to call the actual grouper method here
result = obj.groupby(
self.grouper, axis=self.axis).aggregate(how, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _adjust_binner_for_upsample(self, binner):
"""
Adjust our binner when upsampling.
The range of a new index should not be outside specified range
"""
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad',
'ffill', 'asfreq'} method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
self._set_binner()
if self.axis:
raise AssertionError('axis must be 0')
if self._from_selection:
raise ValueError("Upsampling from level= or on= selection"
" is not supported, use .set_index(...)"
" to explicitly set index to"
" datetime-like")
ax = self.ax
obj = self._selected_obj
binner = self.binner
res_index = self._adjust_binner_for_upsample(binner)
# if we have the same frequency as our axis, then we are equal sampling
if limit is None and to_offset(ax.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=method,
limit=limit, fill_value=fill_value)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _wrap_result(self, result):
result = super(DatetimeIndexResampler, self)._wrap_result(result)
# we may have a different kind that we were asked originally
# convert if needed
if self.kind == 'period' and not isinstance(result.index, PeriodIndex):
result.index = result.index.to_period(self.freq)
return result
class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return DatetimeIndexResampler
class PeriodIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return PeriodIndexResamplerGroupby
def _get_binner_for_time(self):
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._get_binner_for_time()
return self.groupby._get_period_bins(self.ax)
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
if self._from_selection:
# see GH 14008, GH 12871
msg = ("Resampling from level= or on= selection"
" with a PeriodIndex is not currently supported,"
" use .set_index(...) to explicitly set index")
raise NotImplementedError(msg)
if self.loffset is not None:
# Cannot apply loffset/timedelta to PeriodIndex -> convert to
# timestamps
self.kind = 'timestamp'
# convert to timestamp
if self.kind == 'timestamp':
obj = obj.to_timestamp(how=self.convention)
return obj
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# Downsampling
return self._groupby_and_aggregate(how, grouper=self.grouper,
**kwargs)
elif is_superperiod(ax.freq, self.freq):
if how == 'ohlc':
# GH #13083
# upsampling to subperiods is handled as an asfreq, which works
# for pure aggregating/reducing methods
# OHLC reduces along the time dimension, but creates multiple
# values for each period -> handle by _groupby_and_aggregate()
return self._groupby_and_aggregate(how, grouper=self.grouper)
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
'Frequency {} cannot be resampled to {}, as they are not '
'sub or super periods'.format(ax.freq, self.freq))
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._upsample(
method, limit=limit, fill_value=fill_value)
self._set_binner()
ax = self.ax
obj = self.obj
new_index = self.binner
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(
obj, indexer, new_index, axis=self.axis))
class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return PeriodIndexResampler
class TimedeltaIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return TimedeltaIndexResamplerGroupby
def _get_binner_for_time(self):
return self.groupby._get_time_delta_bins(self.ax)
def _adjust_binner_for_upsample(self, binner):
"""
Adjust our binner when upsampling.
The range of a new index is allowed to be greater than original range
so we don't need to change the length of a binner, GH 13022
"""
return binner
class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return TimedeltaIndexResampler
def resample(obj, kind=None, **kwds):
""" create a TimeGrouper and return our resampler """
tg = TimeGrouper(**kwds)
return tg._get_resampler(obj, kind=kind)
resample.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,
limit=None, kind=None, **kwargs):
""" return our appropriate resampler when grouping as well """
# .resample uses 'on' similar to how .groupby uses 'key'
kwargs['key'] = kwargs.pop('on', None)
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
"""
_attributes = Grouper._attributes + ('closed', 'label', 'how',
'loffset', 'kind', 'convention',
'base')
def __init__(self, freq='Min', closed=None, label=None, how='mean',
axis=0, fill_method=None, limit=None, loffset=None,
kind=None, convention=None, base=0, **kwargs):
# Check for correctness of the keyword arguments which would
# otherwise silently use the default if misspelled
if label not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `label`'.format(label))
if closed not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `closed`'.format(
closed))
if convention not in {None, 'start', 'end', 'e', 's'}:
raise ValueError('Unsupported value {} for `convention`'
.format(convention))
freq = to_offset(freq)
end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'}
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
if isinstance(loffset, compat.string_types):
loffset = to_offset(loffset)
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj, kind=None):
"""
return my resampler or raise if we have an invalid axis
Parameters
----------
obj : input object
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
a Resampler
Raises
------
TypeError if incompatible axis
"""
self._set_grouper(obj)
ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, PeriodIndex) or kind == 'period':
return PeriodIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(obj,
groupby=self,
axis=self.axis)
raise TypeError("Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
"but got an instance of %r" % type(ax).__name__)
def _get_grouper(self, obj, validate=True):
# create the resampler and return our binner
r = self._get_resampler(obj)
r._set_binner()
return r.binner, r.grouper, r.obj
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = _get_range_edges(ax.min(), ax.max(), self.freq,
closed=self.closed,
base=self.base)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(freq=self.freq,
start=first,
end=last,
tz=tz,
name=ax.name)
# GH 15549
# In edge case of tz-aware resapmling binner last index can be
# less than the last variable in data object, this happens because of
# DST time change
if len(binner) > 1 and binner[-1] < last:
extra_date_range = pd.date_range(binner[-1], last + self.freq,
freq=self.freq, tz=tz,
name=ax.name)
binner = labels = binner.append(extra_date_range[1:])
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == last and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
if self.closed == 'right':
# GH 21459, GH 9119: Adjust the bins relative to the wall time
bin_edges = binner.tz_localize(None)
bin_edges = bin_edges + timedelta(1) - Nano(1)
bin_edges = bin_edges.tz_localize(binner.tz).asi8
else:
bin_edges = binner.asi8
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
else:
bin_edges = binner.asi8
return binner, bin_edges
def _get_time_delta_bins(self, ax):
if not isinstance(ax, TimedeltaIndex):
raise TypeError('axis must be a TimedeltaIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = TimedeltaIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start, end = ax.min(), ax.max()
labels = binner = TimedeltaIndex(start=start,
end=end,
freq=self.freq,
name=ax.name)
end_stamps = labels + self.freq
bins = ax.searchsorted(end_stamps, side='left')
# Addresses GH #10530
if self.base > 0:
labels += type(self.freq)(self.base)
return binner, bins, labels
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
freq = self.freq
if not len(ax):
binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=freq,
name=ax.name)
end_stamps = (labels + freq).asfreq(freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
def _get_period_bins(self, ax):
if not isinstance(ax, PeriodIndex):
raise TypeError('axis must be a PeriodIndex, but got '
'an instance of %r' % type(ax).__name__)
memb = ax.asfreq(self.freq, how=self.convention)
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
nat_count = 0
if memb.hasnans:
nat_count = np.sum(memb._isnan)
memb = memb[~memb._isnan]
# if index contains no valid (non-NaT) values, return empty index
if not len(memb):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax.min().asfreq(self.freq, how=self.convention)
end = ax.max().asfreq(self.freq, how='end')
labels = binner = PeriodIndex(start=start, end=end,
freq=self.freq, name=ax.name)
i8 = memb.asi8
freq_mult = self.freq.n
# when upsampling to subperiods, we need to generate enough bins
expected_bins_count = len(binner) * freq_mult
i8_extend = expected_bins_count - (i8[-1] - i8[0])
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
rng += freq_mult
bins = memb.searchsorted(rng, side='left')
if nat_count > 0:
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
# shift bins by the number of NaT
bins += nat_count
bins = np.insert(bins, 0, nat_count)
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
return binner, bins, labels
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = algos.take_1d(obj.values, indexer)
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
raise ValueError("'obj' should be either a Series or a DataFrame")
def _get_range_edges(first, last, offset, closed='left', base=0):
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset,
closed=closed, base=base)
else:
first = first.normalize()
last = last.normalize()
if closed == 'left':
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# GH 10117 & GH 19375. If first and last contain timezone information,
# Perform the calculation in UTC in order to avoid localizing on an
# Ambiguous or Nonexistent time.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
start_day_nanos = first.normalize().value
if first_tzinfo is not None:
first = first.tz_convert('UTC')
if last_tzinfo is not None:
last = last.tz_convert('UTC')
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == 'right':
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
fresult = Timestamp(fresult)
lresult = Timestamp(lresult)
if first_tzinfo is not None:
fresult = fresult.tz_localize('UTC').tz_convert(first_tzinfo)
if last_tzinfo is not None:
lresult = lresult.tz_localize('UTC').tz_convert(last_tzinfo)
return fresult, lresult
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Utility frequency conversion method for Series/DataFrame
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
raise NotImplementedError("'method' argument is not supported")
if how is None:
how = 'E'
new_obj = obj.copy()
new_obj.index = obj.index.asfreq(freq, how=how)
elif len(obj.index) == 0:
new_obj = obj.copy()
new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))
else:
dti = date_range(obj.index[0], obj.index[-1], freq=freq)
dti.name = obj.index.name
new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
if normalize:
new_obj.index = new_obj.index.normalize()
return new_obj
| bsd-3-clause |
ngvozdiev/ctr-base | python/plot_link_paths.py | 1 | 3124 | from collections import defaultdict
from scipy import interpolate
import numpy as np
import matplotlib.pylab as plt
import parser_wrapper
import glob
import itertools
import matplotlib.patches as mpatches
import argparse
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
parser = argparse.ArgumentParser(description='Plots link occupancy')
parser.add_argument('--file', type=str, help='Metric file')
parser.add_argument('--sofile', type=str, help='Library file for parser', default='libmetrics_parser.dylib')
parser.add_argument('--metric', type=str, help='Metric id', default='path_bytes')
parser.add_argument('--x_min', type=float, default=0)
parser.add_argument('--x_max', type=float, default=2000)
args = parser.parse_args()
INTERESTING_LINKS = ['N0->N1', 'N4->N5', 'N8->N9', 'N12->N13']
p = parser_wrapper.MetricsParser(args.file, args.sofile)
data = p.Parse(args.metric, '.*', deltas=True)
print data
ax_f, axarr = plt.subplots(len(INTERESTING_LINKS), sharex=True, sharey=True)
def SrcDstLabel(src, dst):
s = str(src) + u'\u2192' + str(dst)
return s.replace('N', '')
def AggFromPath(path):
path = path.split('[')[1].split(']')[0]
pieces = path.split('->')
return SrcDstLabel(pieces[0], pieces[-1])
cm = plt.get_cmap('hot')
NUM_COLORS=5
colors = itertools.cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)])
color_map = {}
def GetColor(label):
if label in color_map:
return color_map[label]
return color_map.setdefault(label, colors.next())
GetColor(SrcDstLabel(0, 1))
GetColor(SrcDstLabel(2, 3))
GetColor(SrcDstLabel(6, 7))
GetColor(SrcDstLabel(10, 11))
for i, link in enumerate(INTERESTING_LINKS):
ax = axarr[i]
xs = []
fs = []
labels = []
for key, value in data.items():
assert(key[0] == args.metric)
path = key[1]
if link in path:
x, y = value
x = np.array(x, dtype=np.float64) * 0.000000000001
y = np.array(y, dtype=np.float64) * (100.0 / 1000.0 / 1000.0 / 1000.0) * 8
x, y = parser_wrapper.Bin(x, y, 100)
xs.append(x)
fs.append(interpolate.interp1d(x,y, bounds_error=False, fill_value=0))
labels.append(AggFromPath(path))
if len(xs) == 0:
continue
max_x = max(len(i) for i in xs)
x = None
for xi in xs:
if len(xi) == max_x:
x = xi
ys = [f(x) for f in fs]
colors_list = [GetColor(i) for i in labels]
ngons = ax.stackplot(x, ys, labels=labels, colors=colors_list)
# ngons[0].set_hatch('//')
ax.set_ylabel('Gbps')
ax.legend(loc=1, prop={'size': 10})
#color_items = color_map.items()
#ax.legend([plt.Rectangle((0, 0), 1, 1, fc=v) for _, v in color_items],
# [k for k, _ in color_items], ncol=2, loc=2)
ax_f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in ax_f.axes[:-1]], visible=False)
plt.xlim([args.x_min, args.x_max])
plt.ylim([0,0.999])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 300))
plt.xlabel('seconds')
plt.savefig('link_paths_out.pdf', bbox_inches='tight')
plt.show()
| mit |
bartvm/pylearn2 | pylearn2/optimization/test_batch_gradient_descent.py | 44 | 6402 | from __future__ import print_function
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
import theano.tensor as T
from pylearn2.utils import sharedX
import numpy as np
from theano.compat.six.moves import xrange
from theano import config
from theano.printing import min_informative_str
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * \
np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print('objective function value came out wrong on sample ',i)
print('analytical obj', analytical_obj)
print('actual obj',actual_obj)
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print('Checking for numerically induced non-convex behavior')
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print(prev)
step_size = 1e-4
x += step_size * d
cur = f(x)
print(cur)
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print(cur)
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print('flip')
flip_cnt += 1
if flip_cnt > 1:
print("Non-convex!")
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
x += step_size * d
plt.plot(y)
plt.show()
assert False
print('None found')
"""
#print 'actual x',actual_x
#print 'A:'
#print A
#print 'b:'
#print b
#print 'c:'
#print c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
x_grad = minimizer.param_to_grad_shared[x]
actual_grad = x_grad.get_value()
correct_grad = 0.5 * np.dot(A,x.get_value())+ 0.5 * \
np.dot(A.T, x.get_value()) +b
if not np.allclose(actual_grad, correct_grad):
print('gradient was wrong at convergence point')
print('actual grad: ')
print(actual_grad)
print('correct grad: ')
print(correct_grad)
print('max difference: ', end='')
np.abs(actual_grad-correct_grad).max()
assert False
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
step_len = ( np.dot(b,d) + 0.5 * np.dot(d,np.dot(A,actual_x)) \
+ 0.5 * np.dot(actual_x,np.dot(A,d)) ) \
/ np.dot(d, np.dot(A,d))
g = np.dot(A,actual_x)+b
deriv = np.dot(g,d)
print('directional deriv at actual', deriv)
print('optimal step_len', step_len)
optimal_x = actual_x - d * step_len
g = np.dot(A,optimal_x) + b
deriv = np.dot(g,d)
print('directional deriv at optimal: ',deriv)
x.set_value(optimal_x)
print('obj at optimal: ',minimizer.obj(A,b,c))
print('eigenvalue range:')
val, vec = np.linalg.eig(A)
print((val.min(),val.max()))
print('condition number: ',(val.max()/val.min()))
assert False
if __name__ == '__main__':
test_batch_gradient_descent()
| bsd-3-clause |
maartenbreddels/vaex | packages/vaex-core/vaex/legacy.py | 1 | 60957 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import vaex
from .tasks import Task, TaskMapReduce
from .utils import _parse_f
import six
def _asfloat(a):
if a.dtype.type == np.float64 and a.strides[0] == 8:
return a
else:
return a.astype(np.float64, copy=False)
class TaskMapReduceLegacy(TaskMapReduce):
def __init__(self, *args, **kwargs):
kwargs = kwargs.copy()
kwargs['ignore_filter'] = True
TaskMapReduce.__init__(self, *args, **kwargs)
class TaskHistogram(Task):
def __init__(self, df, subspace, expressions, size, limits, masked=False, weight=None):
self.size = size
self.limits = limits
Task.__init__(self, df, expressions, name="histogram")
self.subspace = subspace
self.dtype = np.float64
self.masked = masked
self.weight = weight
# self.grids = vaex.grids.Grids(self.df, self.df.executor.thread_pool, *expressions)
# self.grids.ranges = limits
# self.grids.grids["counts"] = vaex.grids.Grid(self.grids, size, self.dimension, None)
shape1 = (self.size,) * self.dimension
try:
self.size[0]
shape1 = tuple(self.size)
except:
pass
shape = (self.subspace.executor.thread_pool.nthreads,) + shape1
self.data = np.zeros(shape, dtype=self.dtype)
self.ranges_flat = []
self.minima = []
self.maxima = []
for limit in self.limits:
self.ranges_flat.extend(limit)
vmin, vmax = limit
self.minima.append(vmin)
self.maxima.append(vmax)
if self.weight is not None:
self.expressions_all.append(weight)
# print self.ranges_flat
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
return "<%s(df=%r, expressions=%r, size=%r, limits=%r)> instance at 0x%x" % (name, self.df, self.expressions, self.size, self.limits, id(self))
def map(self, thread_index, i1, i2, filter_mask, *blocks):
class Info(object):
pass
info = Info()
info.i1 = i1
info.i2 = i2
info.first = i1 == 0
info.last = i2 == self.df.length_unfiltered()
info.size = i2 - i1
# print "bin", i1, i2, info.last
# self.grids["counts"].bin_block(info, *blocks)
# mask = self.df.mask
data = self.data[thread_index]
blocks = [_asfloat(block) for block in blocks]
if self.masked or self.df.filtered:
mask = self.df.evaluate_selection_mask("default" if self.masked else None, i1=i1, i2=i2, pre_filtered=False)
blocks = [block[mask] for block in blocks]
subblock_weight = None
if len(blocks) == len(self.expressions) + 1:
subblock_weight = blocks[-1]
blocks = list(blocks[:-1])
# print subblocks[0]
# print subblocks[1]
if self.dimension == 1:
vaex.vaexfast.histogram1d(blocks[0], subblock_weight, data, *self.ranges_flat)
elif self.dimension == 2:
# if subblock_weight is None:
# #print "speedup?"
# histogram_numba(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)
# else:
vaex.vaexfast.histogram2d(blocks[0], blocks[1], subblock_weight, data, *self.ranges_flat)
# vaex.vaexfast.statisticNd([blocks[0], blocks[1]], subblock_weight, data, self.minima, self.maxima, 0)
elif self.dimension == 3:
vaex.vaexfast.histogram3d(blocks[0], blocks[1], blocks[2], subblock_weight, data, *self.ranges_flat)
else:
blocks = list(blocks) # histogramNd wants blocks to be a list
vaex.vaexfast.histogramNd(blocks, subblock_weight, data, self.minima, self.maxima)
return i1
# return map(self._map, blocks)#[self.map(block) for block in blocks]
def reduce(self, results):
for i in range(1, self.subspace.executor.thread_pool.nthreads):
self.data[0] += self.data[i]
return self.data[0]
# return self.data
class SubspaceGridded(object):
def __init__(self, subspace_bounded, grid, vx=None, vy=None, vcounts=None):
self.subspace_bounded = subspace_bounded
self.grid = grid
self.vx = vx
self.vy = vy
self.vcounts = vcounts
def vector(self, weightx, weighty, size=32):
counts = self.subspace_bounded.gridded_by_histogram(size=size)
vx = self.subspace_bounded.gridded_by_histogram(size=size, weight=weightx)
vy = self.subspace_bounded.gridded_by_histogram(size=size, weight=weighty)
return SubspaceGridded(self.subspace_bounded, self.grid, vx=vx, vy=vy, vcounts=counts)
def filter_gaussian(self, sigmas=1):
import scipy.ndimage
return SubspaceGridded(self.subspace_bounded, scipy.ndimage.filters.gaussian_filter(self.grid, sigmas))
def clip_relative(self, v1, v2):
vmin = self.grid.min()
vmax = self.grid.max()
width = vmax - vmin
return SubspaceGridded(self.subspace_bounded, np.clip(self.grid, vmin + v1 * width, vmin + v2 * width))
def volr(self, **kwargs):
import vaex.notebook
return vaex.notebook.volr(subspace_gridded=self, **kwargs)
def plot(self, axes=None, **kwargs):
self.subspace_bounded.subspace.plot(np.log1p(self.grid), limits=self.subspace_bounded.bounds, axes=axes, **kwargs)
def mean_line(self, axis=0, **kwargs):
from matplotlib import pylab
assert axis in [0, 1]
other_axis = 0 if axis == 1 else 1
xmin, xmax = self.subspace_bounded.bounds[axis]
ymin, ymax = self.subspace_bounded.bounds[other_axis]
x = vaex.utils.linspace_centers(xmin, xmax, self.grid.shape[axis])
y = vaex.utils.linspace_centers(ymin, ymax, self.grid.shape[other_axis])
print(y)
if axis == 0:
counts = np.sum(self.grid, axis=axis)
means = np.sum(self.grid * y[np.newaxis, :].T, axis=axis) / counts
else:
counts = np.sum(self.grid, axis=axis)
means = np.sum(self.grid * y[:, np.newaxis].T, axis=axis) / counts
if axis == 0:
result = pylab.plot(x, means, **kwargs)
else:
result = pylab.plot(means, x, **kwargs)
self.subspace_bounded.lim()
return result, x, means
def _repr_png_(self):
from matplotlib import pylab
fig, ax = pylab.subplots()
self.plot(axes=ax, f=np.log1p)
import vaex.utils
if all([k is not None for k in [self.vx, self.vy, self.vcounts]]):
N = self.vx.grid.shape[0]
bounds = self.subspace_bounded.bounds
print(bounds)
positions = [vaex.utils.linspace_centers(bounds[i][0], bounds[i][1], N) for i in range(self.subspace_bounded.subspace.dimension)]
print(positions)
mask = self.vcounts.grid > 0
vx = np.zeros_like(self.vx.grid)
vy = np.zeros_like(self.vy.grid)
vx[mask] = self.vx.grid[mask] / self.vcounts.grid[mask]
vy[mask] = self.vy.grid[mask] / self.vcounts.grid[mask]
# vx = self.vx.grid / self.vcounts.grid
# vy = self.vy.grid / self.vcounts.grid
x2d, y2d = np.meshgrid(positions[0], positions[1])
ax.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask])
# print x2d
# print y2d
# print vx
# print vy
# ax.quiver(x2d, y2d, vx, vy)
ax.title.set_text(r"$\log(1+counts)$")
ax.set_xlabel(self.subspace_bounded.subspace.expressions[0])
ax.set_ylabel(self.subspace_bounded.subspace.expressions[1])
# pylab.savefig
# from .io import StringIO
from six import StringIO
file_object = StringIO()
fig.canvas.print_png(file_object)
pylab.close(fig)
return file_object.getvalue()
def cube_png(self, f=np.log1p, colormap="afmhot", file="cube.png"):
if self.grid.shape != ((128,) * 3):
logger.error("only 128**3 cubes are supported")
return None
colormap_name = "afmhot"
import matplotlib.cm
colormap = matplotlib.cm.get_cmap(colormap_name)
mapping = matplotlib.cm.ScalarMappable(cmap=colormap)
# pixmap = QtGui.QPixmap(32*2, 32)
data = np.zeros((128 * 8, 128 * 16, 4), dtype=np.uint8)
# mi, ma = 1*10**self.mod1, self.data3d.max()*10**self.mod2
grid = f(self.grid)
vmin, vmax = grid.min(), grid.max()
grid_normalized = (grid - vmin) / (vmax - vmin)
# intensity_normalized = (np.log(self.data3d + 1.) - np.log(mi)) / (np.log(ma) - np.log(mi));
import PIL.Image
for y2d in range(8):
for x2d in range(16):
zindex = x2d + y2d * 16
I = grid_normalized[zindex]
rgba = mapping.to_rgba(I, bytes=True) # .reshape(Nx, 4)
# print rgba.shape
subdata = data[y2d * 128:(y2d + 1) * 128, x2d * 128:(x2d + 1) * 128]
for i in range(3):
subdata[:, :, i] = rgba[:, :, i]
subdata[:, :, 3] = (grid_normalized[zindex] * 255).astype(np.uint8) # * 0 + 255
if 0:
filename = "cube%03d.png" % zindex
img = PIL.Image.frombuffer("RGB", (128, 128), subdata[:, :, 0:3] * 1)
print(("saving to", filename))
img.save(filename)
img = PIL.Image.frombuffer("RGBA", (128 * 16, 128 * 8), data, 'raw') # , "RGBA", 0, -1)
# filename = "cube.png"
# print "saving to", file
img.save(file, "png")
if 0:
filename = "colormap.png"
print(("saving to", filename))
height, width = self.colormap_data.shape[:2]
img = PIL.Image.frombuffer("RGB", (width, height), self.colormap_data)
img.save(filename)
class SubspaceBounded(object):
def __init__(self, subspace, bounds):
self.subspace = subspace
self.bounds = bounds
def histogram(self, size=256, weight=None):
return self.subspace.histogram(limits=self.bounds, size=size, weight=weight)
def gridded(self, size=256, weight=None):
return self.gridded_by_histogram(size=size, weight=weight)
def gridded_by_histogram(self, size=256, weight=None):
grid = self.histogram(size=size, weight=weight)
return SubspaceGridded(self, grid)
def lim(self):
from matplotlib import pylab
xmin, xmax = self.bounds[0]
ymin, ymax = self.bounds[1]
pylab.xlim(xmin, xmax)
pylab.ylim(ymin, ymax)
class Subspaces(object):
"""
:type: subspaces: list[Subspace]
"""
def __init__(self, subspaces):
self.subspaces = subspaces
self.expressions = set()
first_subspace = self.subspaces[0]
self.delay = first_subspace.delay
self.dimension = first_subspace.dimension
self.df = self.subspaces[0].df
for subspace in self.subspaces:
assert subspace.df == self.subspaces[0].df
assert subspace.delay == self.subspaces[0].delay
assert subspace.dimension == self.subspaces[0].dimension, "subspace is of dimension %s, while first subspace if of dimension %s" % (subspace.dimension, self.subspaces[0].dimension)
# assert subspace.sele== self.subspaces[0].delay
self.expressions.update(subspace.expressions)
self.expressions = list(self.expressions)
self.subspace = self.df(*list(self.expressions), delay=self.delay, executor=first_subspace.executor)
# def _repr_html_(self):
def __len__(self):
return len(self.subspaces)
def names(self, seperator=" "):
return [seperator.join(subspace.expressions) for subspace in self.subspaces]
def expressions_list(self):
return [subspace.expressions for subspace in self.subspaces]
def selected(self):
return Subspaces([subspace.selected() for subspace in self.subspaces])
def _unpack(self, values):
value_map = dict(zip(self.expressions, values))
return [[value_map[ex] for ex in subspace.expressions] for subspace in self.subspaces]
def _pack(self, values):
value_map = {}
for subspace_values, subspace in zip(values, self.subspaces):
for value, expression in zip(subspace_values, subspace.expressions):
if expression in value_map:
if isinstance(value, np.ndarray):
assert np.all(value_map[expression] == value), "inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other" % (expression, value, value_map[expression])
else:
assert value_map[expression] == value, "inconsistency in subspaces, value for expression %r is %r in one case, and %r in the other" % (expression, value, value_map[expression])
else:
value_map[expression] = value
return [value_map[expression] for expression in self.expressions]
def minmax(self):
if self.delay:
return self.subspace.minmax().then(self._unpack)
else:
return self._unpack(self.subspace.minmax())
def limits_sigma(self, sigmas=3, square=False):
if self.delay:
return self.subspace.limits_sigma(sigmas=sigmas, square=square).then(self._unpack)
else:
return self._unpack(self.subspace.limits_sigma(sigmas=sigmas, square=square))
def mutual_information(self, limits=None, size=256):
if limits is not None:
limits = self._pack(limits)
def mutual_information(limits):
return vaex.promise.listPromise([vaex.promise.Promise.fulfilled(subspace.mutual_information(subspace_limits, size=size)) for subspace_limits, subspace in zip(limits, self.subspaces)])
# return histograms
if limits is None:
limits_promise = vaex.promise.Promise.fulfilled(self.subspace.minmax())
else:
limits_promise = vaex.promise.Promise.fulfilled(limits)
limits_promise = limits_promise.then(self._unpack)
promise = limits_promise.then(mutual_information)
return promise if self.delay else promise.get()
def mean(self):
if self.delay:
return self.subspace.mean().then(self._unpack)
else:
means = self.subspace.mean()
return self._unpack(means)
def var(self, means=None):
# 'pack' means, and check if it makes sence
if means is not None:
means = self._pack(means)
def var(means):
return self.subspace.var(means=means)
if self.delay:
# if means is None:
# return self.subspace.mean().then(var).then(self._unpack)
# else:
return var(means).then(self._unpack)
else:
# if means is None:
# means = self.subspace.mean()
# logger.debug("means: %r", means)
return self._unpack(var(means=means))
def correlation(self, means=None, vars=None):
def var(means):
return self.subspace.var(means=means)
def correlation(means_and_vars):
means, vars = means_and_vars
means, vars = self._unpack(means), self._unpack(vars)
# return self.subspace.correlation(means=means, vars=vars)
return vaex.promise.listPromise([subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)])
if means is not None:
means = self._pack(means)
if vars is not None:
vars = self._pack(vars)
if self.delay:
if means is None:
mean_promise = self.subspace.mean()
else:
mean_promise = vaex.promise.Promise.fulfilled(means)
if vars is None:
var_promise = mean_promise.then(var)
else:
var_promise = vaex.promise.Promise.fulfilled(vars)
mean_and_var_calculated = vaex.promise.listPromise(mean_promise, var_promise)
return mean_and_var_calculated.then(correlation)
else:
if means is None:
means = self.subspace.mean()
if vars is None:
vars = self.subspace.var(means=means)
means = self._unpack(means)
vars = self._unpack(vars)
return [subspace.correlation(means=subspace_mean, vars=subspace_var) for subspace_mean, subspace_var, subspace in zip(means, vars, self.subspaces)]
# return correlation((means, vars))
# def bounded_by(self, limits_list):
# return SubspacesBounded(SubspaceBounded(subspace, limits) for subspace, limit in zip(self.subspaces, limits_list))
class Subspace(object):
"""A Subspace represent a subset of columns or expressions from a df.
subspace are not instantiated directly, but by 'calling' the df like this:
>>> subspace_xy = some_df("x", "y")
>>> subspace_r = some_df("sqrt(x**2+y**2)")
See `vaex.df.Dataset` for more documentation.
"""
def __init__(self, df, expressions, executor, delay, masked=False):
"""
:param Dataset df: the df the subspace refers to
:param list[str] expressions: list of expressions that forms the subspace
:param Executor executor: responsible for executing the tasks
:param bool delay: return answers directly, or as a promise
:param bool masked: work on the selection or not
:return:
"""
self.df = df
self.expressions = expressions
self.executor = executor
self.delay = delay
self.is_masked = masked
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
return "<%s(df=%r, expressions=%r, delay=%r, is_masked=%r)> instance at 0x%x" % (name, self.df, self.expressions, self.delay, self.is_masked, id(self))
@property
def dimension(self):
return len(self.expressions)
def get_selection(self):
return self.df.get_selection("default") if self.is_masked else None
def is_selected(self):
return self.is_masked
def selected(self):
return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=self.delay, masked=True)
def delayhronous(self):
return self.__class__(self.df, expressions=self.expressions, executor=self.executor, delay=True, masked=self.is_masked)
def image_rgba_save(self, filename, data=None, rgba8=None, **kwargs):
if rgba8 is not None:
data = self.image_rgba_data(rgba8=rgba8, **kwargs)
if data is None:
data = self.image_rgba_data(**kwargs)
with open(filename, "wb") as f:
f.write(data)
def image_rgba_notebook(self, data=None, rgba8=None, **kwargs):
if rgba8 is not None:
data = self.image_rgba_data(rgba8=rgba8, **kwargs)
if data is None:
data = self.image_rgba_data(**kwargs)
from IPython.display import display, Image
return Image(data=data)
def image_rgba_data(self, rgba8=None, format="png", pil_draw=False, **kwargs):
import PIL.Image
import PIL.ImageDraw
from six import StringIO
if rgba8 is None:
rgba8 = self.image_rgba(**kwargs)
img = PIL.Image.frombuffer("RGBA", rgba8.shape[:2], rgba8, 'raw') # , "RGBA", 0, -1)
if pil_draw:
draw = PIL.ImageDraw.Draw(img)
pil_draw(draw)
f = StringIO()
img.save(f, format)
return f.getvalue()
def image_rgba_url(self, rgba8=None, **kwargs):
if rgba8 is None:
rgba8 = self.image_rgba(**kwargs)
import PIL.Image
img = PIL.Image.frombuffer("RGBA", rgba8.shape[:2], rgba8, 'raw') # , "RGBA", 0, -1)
from six import StringIO
f = StringIO()
img.save(f, "png")
from base64 import b64encode
imgurl = "data:image/png;base64," + b64encode(f.getvalue()) + ""
return imgurl
def normalize_grid(self, grid):
grid = grid * 1 # copy
mask = (grid > 0) & np.isfinite(grid)
if grid.sum():
grid -= grid[mask].min()
grid /= grid[mask].max()
else:
grid[:] = 0
return grid
def limits(self, value, square=False):
"""TODO: doc + server side implementation"""
if isinstance(value, six.string_types):
import re
match = re.match(r"(\d*)(\D*)", value)
if match is None:
raise ValueError("do not understand limit specifier %r, examples are 90%, 3sigma")
else:
value, type = match.groups()
import ast
value = ast.literal_eval(value)
type = type.strip()
if type in ["s", "sigma"]:
return self.limits_sigma(value)
elif type in ["ss", "sigmasquare"]:
return self.limits_sigma(value, square=True)
elif type in ["%", "percent"]:
return self.limits_percentage(value)
elif type in ["%s", "%square", "percentsquare"]:
return self.limits_percentage(value, square=True)
if value is None:
return self.limits_percentage(square=square)
else:
return value
def image_rgba(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat="mean", figsize=None,
aspect="auto", f=lambda x: x, axes=None, xlabel=None, ylabel=None,
group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=10, cmap="afmhot",
vmin=None, vmax=None,
pre_blend=False, background_color="white", background_alpha=1., normalize=True, color=None):
f = _parse_f(f)
if grid is None:
limits = self.limits(limits)
if limits is None:
limits = self.limits_sigma()
if group_limits is None and group_by:
group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)
if weight_stat == "mean" and weight is not None:
grid = self.bin_mean(weight, limits=limits, size=size, group_limits=group_limits, group_by=group_by)
else:
grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)
if grid is None: # cancel occured
return
import matplotlib.cm
background_color = np.array(matplotlib.colors.colorConverter.to_rgb(background_color))
if group_by:
gmin, gmax, group_count = group_limits
if isinstance(group_colors, six.string_types):
group_colors = matplotlib.cm.get_cmap(group_colors)
if isinstance(group_colors, matplotlib.colors.Colormap):
group_count = group_limits[2]
colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]
else:
colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]
total = np.sum(grid, axis=0).T
# grid /= total
mask = total > 0
alpha = total - total[mask].min()
alpha[~mask] = 0
alpha = total / alpha.max()
rgba = grid.T.dot(colors)
def _norm(data):
mask = np.isfinite(data)
data = data - data[mask].min()
data /= data[mask].max()
return data
rgba[..., 3] = (f(alpha))
# rgba[...,3] = 1
rgba[total == 0, 3] = 0.
mask = alpha > 0
if 1:
for i in range(3):
rgba[..., i] /= total
# rgba[...,i] /= rgba[...,0:3].max()
rgba[~mask, i] = background_color[i]
rgba = (np.swapaxes(rgba, 0, 1))
else:
if color:
color = np.array(matplotlib.colors.colorConverter.to_rgba(color))
rgba = np.zeros(grid.shape + (4,))
rgba[..., 0:4] = color
data = f(grid)
mask = (grid > 0) & np.isfinite(data)
if vmin is None:
vmin = data[mask].min()
if vmax is None:
vmax = data[mask].max()
if mask.sum():
data -= vmin
data /= vmax
data[~mask] = 0
else:
data[:] = 0
rgba[..., 3] = data
else:
cmap = matplotlib.cm.get_cmap(cmap)
data = f(grid)
if normalize:
mask = (data > 0) & np.isfinite(data)
if vmin is None:
vmin = data[mask].min()
if vmax is None:
vmax = data[mask].max()
if mask.sum():
data -= vmin
data /= vmax
else:
data[:] = 0
data[~mask] = 0
data = np.clip(data, 0, 1)
rgba = cmap(data)
if normalize:
rgba[~mask, 3] = 0
rgba[..., 3] = 1 # data
# rgba8 = np.swapaxes(rgba8, 0, 1)
# white = np.ones_like(rgba[...,0:3])
if pre_blend:
# rgba[...,3] = background_alpha
rgb = rgba[..., :3].T
alpha = rgba[..., 3].T
rgb[:] = rgb * alpha + background_color[:3].reshape(3, 1, 1) * (1 - alpha)
alpha[:] = alpha + background_alpha * (1 - alpha)
rgba = np.clip(rgba, 0, 1)
rgba8 = (rgba * 255).astype(np.uint8)
return rgba8
def plot_vectors(self, expression_x, expression_y, limits, wx=None, wy=None, counts=None, size=32, axes=None, **kwargs):
import pylab
# refactor: should go to bin_means_xy
if counts is None:
counts = self.histogram(size=size, limits=limits)
if wx is None:
wx = self.histogram(size=size, weight=expression_x, limits=limits)
if wy is None:
wy = self.histogram(size=size, weight=expression_y, limits=limits)
N = size
positions = [vaex.utils.linspace_centers(limits[i][0], limits[i][1], N) for i in range(self.dimension)]
# print(positions)
mask = counts > 0
vx = wx / counts
vy = wy / counts
vx[counts == 0] = 0
vy[counts == 0] = 0
# vx = self.vx.grid / self.vcounts.grid
# vy = self.vy.grid / self.vcounts.grid
x2d, y2d = np.meshgrid(positions[0], positions[1])
if axes is None:
axes = pylab.gca()
axes.quiver(x2d[mask], y2d[mask], vx[mask], vy[mask], **kwargs)
def plot(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, weight_stat="mean", figsize=None,
aspect="auto", f="identity", axes=None, xlabel=None, ylabel=None,
group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,
vmin=None, vmax=None,
cmap="afmhot",
**kwargs):
"""Plot the subspace using sane defaults to get a quick look at the data.
:param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram
:param size: Passed to Subspace.histogram
:param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma
:param square: argument passed to Subspace.limits_sigma
:param Executor executor: responsible for executing the tasks
:param figsize: (x, y) tuple passed to pylab.figure for setting the figure size
:param aspect: Passed to matplotlib's axes.set_aspect
:param xlabel: String for label on x axis (may contain latex)
:param ylabel: Same for y axis
:param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot'
:return: matplotlib.image.AxesImage
"""
import pylab
f = _parse_f(f)
limits = self.limits(limits)
if limits is None:
limits = self.limits_sigma()
# if grid is None:
if group_limits is None and group_by:
group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)
# grid = self.histogram(limits=limits, size=size, weight=weight, group_limits=group_limits, group_by=group_by)
if figsize is not None:
pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
if axes is None:
axes = pylab.gca()
fig = pylab.gcf()
# if xlabel:
pylab.xlabel(xlabel or self.expressions[0])
# if ylabel:
pylab.ylabel(ylabel or self.expressions[1])
# axes.set_aspect(aspect)
rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat,
f=f, axes=axes,
group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,
vmin=vmin, vmax=vmax,
cmap=cmap)
import matplotlib
if group_by:
if isinstance(group_colors, six.string_types):
group_colors = matplotlib.cm.get_cmap(group_colors)
if isinstance(group_colors, matplotlib.colors.Colormap):
group_count = group_limits[2]
colors = [group_colors(k / float(group_count - 1.)) for k in range(group_count)]
else:
colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]
colormap = matplotlib.colors.ListedColormap(colors)
gmin, gmax, group_count = group_limits # [:2]
delta = (gmax - gmin) / (group_count - 1.)
norm = matplotlib.colors.Normalize(gmin - delta / 2, gmax + delta / 2)
sm = matplotlib.cm.ScalarMappable(norm, colormap)
sm.set_array(1) # make matplotlib happy (strange behavious)
colorbar = fig.colorbar(sm)
if group_labels:
colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))
colorbar.set_ticklabels(group_labels)
else:
colorbar.set_ticks(np.arange(gmin, gmax + delta / 2, delta))
colorbar.set_ticklabels(map(lambda x: "%f" % x, np.arange(gmin, gmax + delta / 2, delta)))
colorbar.ax.set_ylabel(group_by)
# matplotlib.colorbar.ColorbarBase(axes, norm=norm, cmap=colormap)
im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs)
else:
norm = matplotlib.colors.Normalize(0, 23)
sm = matplotlib.cm.ScalarMappable(norm, cmap)
sm.set_array(1) # make matplotlib happy (strange behavious)
colorbar = fig.colorbar(sm)
im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin="lower", aspect=aspect, **kwargs)
colorbar = None
return im, colorbar
def plot1d(self, grid=None, size=64, limits=None, weight=None, figsize=None, f="identity", axes=None, xlabel=None, ylabel=None, **kwargs):
"""Plot the subspace using sane defaults to get a quick look at the data.
:param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram
:param size: Passed to Subspace.histogram
:param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma
:param figsize: (x, y) tuple passed to pylab.figure for setting the figure size
:param xlabel: String for label on x axis (may contain latex)
:param ylabel: Same for y axis
:param kwargs: extra argument passed to ...,
"""
import pylab
f = _parse_f(f)
limits = self.limits(limits)
assert self.dimension == 1, "can only plot 1d, not %s" % self.dimension
if limits is None:
limits = self.limits_sigma()
if grid is None:
grid = self.histogram(limits=limits, size=size, weight=weight)
if figsize is not None:
pylab.figure(num=None, figsize=figsize, dpi=80, facecolor='w', edgecolor='k')
if axes is None:
axes = pylab.gca()
# if xlabel:
pylab.xlabel(xlabel or self.expressions[0])
# if ylabel:
# pylab.ylabel(ylabel or self.expressions[1])
pylab.ylabel("counts" or ylabel)
# axes.set_aspect(aspect)
N = len(grid)
xmin, xmax = limits[0]
return pylab.plot(np.arange(N) / (N - 1.0) * (xmax - xmin) + xmin, f(grid,), drawstyle="steps", **kwargs)
# pylab.ylim(-1, 6)
def plot_histogram_bq(self, f="identity", size=64, limits=None, color="red", bq_cleanup=True):
import vaex.ext.bqplot
limits = self.limits(limits)
plot = vaex.ext.bqplot.BqplotHistogram(self, color, size, limits)
if not hasattr(self, "_bqplot"):
self._bqplot = {}
self._bqplot["cleanups"] = []
else:
if bq_cleanup:
for cleanup in self._bqplot["cleanups"]:
cleanup()
self._bqplot["cleanups"] = []
def cleanup(callback=plot.callback):
self.df.signal_selection_changed.disconnect(callback=callback)
self._bqplot["cleanups"].append(cleanup)
return plot
def plot_bq(self, grid=None, size=256, limits=None, square=False, center=None, weight=None, figsize=None,
aspect="auto", f="identity", fig=None, axes=None, xlabel=None, ylabel=None, title=None,
group_by=None, group_limits=None, group_colors='jet', group_labels=None, group_count=None,
cmap="afmhot", scales=None, tool_select=False, bq_cleanup=True,
**kwargs):
import vaex.ext.bqplot
import bqplot.interacts
import bqplot.pyplot as p
import ipywidgets as widgets
import bqplot as bq
f = _parse_f(f)
limits = self.limits(limits)
import vaex.ext.bqplot
vaex.ext.bqplot.patch()
if not hasattr(self, "_bqplot"):
self._bqplot = {}
self._bqplot["cleanups"] = []
else:
if bq_cleanup:
for cleanup in self._bqplot["cleanups"]:
cleanup()
self._bqplot["cleanups"] = []
if limits is None:
limits = self.limits_sigma()
# if fig is None:
if scales is None:
x_scale = bq.LinearScale(min=limits[0][0], max=limits[0][1])
y_scale = bq.LinearScale(min=limits[1][0], max=limits[1][1])
scales = {'x': x_scale, 'y': y_scale}
else:
x_scale = scales["x"]
y_scale = scales["y"]
if 1:
fig = p.figure() # actually, bqplot doesn't return it
fig = p.current_figure()
fig.fig_color = "black" # TODO, take the color from the colormap
fig.padding_y = 0
# if we don't do this, bqplot may flip some axes... report this bug
x = np.arange(10)
y = x**2
p.plot(x, y, scales=scales)
# p.xlim(*limits[0])
# p.ylim(*limits[1])
# if grid is None:
if group_limits is None and group_by:
group_limits = tuple(self.df(group_by).minmax()[0]) + (group_count,)
# fig = p.
# if xlabel:
fig.axes[0].label = xlabel or self.expressions[0]
# if ylabel:
fig.axes[1].label = ylabel or self.expressions[1]
if title:
fig.title = title
# axes.set_aspect(aspect)
rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight,
f=f, axes=axes,
group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,
cmap=cmap)
# x_scale = p._context["scales"]["x"]
# y_scale = p._context["scales"]["y"]
src = "http://localhost:8888/kernelspecs/python2/logo-64x64.png"
import bqplot.marks
im = vaex.ext.bqplot.Image(src=src, scales=scales, x=0, y=0, width=1, height=1)
if 0:
size = 20
x_data = np.arange(size)
line = bq.Lines(x=x_data, y=np.random.randn(size), scales={'x': x_scale, 'y': y_scale},
stroke_width=3, colors=['red'])
ax_x = bq.Axis(scale=x_scale, tick_format='0.2f', grid_lines='solid')
ax_y = bq.Axis(scale=y_scale, orientation='vertical', tick_format='0.2f', grid_lines='solid')
panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})
lasso = bqplot.interacts.LassoSelector()
brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color="green")
fig = bq.Figure(marks=[line, im], axes=[ax_x, ax_y], min_width=100, min_height=100, interaction=panzoom)
else:
fig.marks = list(fig.marks) + [im]
def make_image(executor, limits):
# print "make image" * 100
self.executor = executor
if self.df.has_selection():
sub = self.selected()
else:
sub = self
return sub.image_rgba(limits=limits, size=size, f=f)
progress = widgets.FloatProgress(value=0.0, min=0.0, max=1.0, step=0.01)
updater = vaex.ext.bqplot.DebouncedThreadedUpdater(self, size, im, make_image, progress_widget=progress)
def update_image():
limits = [x_scale.min, x_scale.max], [y_scale.min, y_scale.max]
# print limits
# print "update...", limits
# vxbq.debounced_threaded_update(self.df, im, make_image2, limits=limits)
updater.update(limits)
def update(*args):
update_image()
y_scale.observe(update, "min")
y_scale.observe(update, "max")
x_scale.observe(update, "min")
x_scale.observe(update, "max")
update_image()
# fig = kwargs.pop('figure', p.current_figure())
tools = []
tool_actions = []
panzoom = bq.PanZoom(scales={'x': [x_scale], 'y': [y_scale]})
tool_actions_map = {u"m": panzoom}
tool_actions.append(u"m")
fig.interaction = panzoom
if tool_select:
brush = bqplot.interacts.BrushSelector(x_scale=x_scale, y_scale=y_scale, color="green")
tool_actions_map["b"] = brush
tool_actions.append("b")
def update_selection(*args):
def f():
if brush.selected:
(x1, y1), (x2, y2) = brush.selected
ex1, ex2 = self.expressions
mode = modes_names[modes_labels.index(button_selection_mode.value)]
self.df.select_rectangle(ex1, ex2, limits=[[x1, x2], [y1, y2]], mode=mode)
else:
self.df.select_nothing()
updater.update_select(f)
brush.observe(update_selection, "selected")
# fig.interaction = brush
# callback = self.df.signal_selection_changed.connect(lambda df: update_image())
callback = self.df.signal_selection_changed.connect(lambda df: updater.update_direct_safe())
def cleanup(callback=callback):
self.df.signal_selection_changed.disconnect(callback=callback)
self._bqplot["cleanups"].append(cleanup)
button_select_nothing = widgets.Button(icon="fa-trash-o")
def select_nothing(button):
self.df.select_nothing()
button_select_nothing.on_click(select_nothing)
tools.append(button_select_nothing)
modes_names = "replace and or xor subtract".split()
modes_labels = "= & | ^ -".split()
button_selection_mode = widgets.ToggleButtons(description='', options=modes_labels)
tools.append(button_selection_mode)
def change_interact(*args):
# print "change", args
fig.interaction = tool_actions_map[button_action.value]
# tool_actions = ["m", "b"]
# tool_actions = [("m", "m"), ("b", "b")]
button_action = widgets.ToggleButtons(description='', options=tool_actions, icons=["fa-arrows", "fa-pencil-square-o"])
button_action.observe(change_interact, "value")
tools.insert(0, button_action)
button_action.value = "m" # tool_actions[-1]
if len(tools) == 1:
tools = []
tools = widgets.HBox(tools)
box_layout = widgets.Layout(display='flex',
flex_flow='column',
# border='solid',
width='100%', height="100%")
fig.fig_margin = {'bottom': 40, 'left': 60, 'right': 10, 'top': 40}
# fig.min_height = 700
# fig.min_width = 400
fig.layout = box_layout
return widgets.VBox([fig, progress, tools])
def figlarge(self, size=(10, 10)):
import pylab
pylab.figure(num=None, figsize=size, dpi=80, facecolor='w', edgecolor='k')
# def bounded(self):
# return self.bounded_by_minmax()
def bounded_by(self, limits):
"""Returns a bounded subspace (SubspaceBounded) with limits as given by limits
:param limits: sequence of [(min, max), ..., (min, max)] values
:rtype: SubspaceBounded
"""
return SubspaceBounded(self, np.array(limits))
def bounded_by_minmax(self):
"""Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.minmax()
:rtype: SubspaceBounded
"""
bounds = self.minmax()
return SubspaceBounded(self, bounds)
bounded = bounded_by_minmax
def bounded_by_sigmas(self, sigmas=3, square=False):
"""Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()
:rtype: SubspaceBounded
"""
bounds = self.limits_sigma(sigmas=sigmas, square=square)
return SubspaceBounded(self, bounds)
def minmax(self):
"""Return a sequence of [(min, max), ..., (min, max)] corresponding to each expression in this subspace ignoring NaN.
"""
raise NotImplementedError
def mean(self):
"""Return a sequence of [mean, ... , mean] corresponding to the mean of each expression in this subspace ignoring NaN.
"""
raise NotImplementedError
def var(self, means=None):
"""Return a sequence of [var, ... , var] corresponding to the variance of each expression in this subspace ignoring NaN.
"""
raise NotImplementedError
def sum(self):
"""Return a sequence of [sum, ... , sum] corresponding to the sum of values of each expression in this subspace ignoring NaN."""
raise NotImplementedError
def histogram(self, limits, size=256, weight=None):
"""Return a grid of shape (size, ..., size) corresponding to the dimensionality of this subspace containing the counts in each element
The type of the grid of np.float64
"""
raise NotImplementedError
def limits_sigma(self, sigmas=3, square=False):
raise NotImplementedError
def row(self, index):
return np.array([self.df.evaluate(expression, i1=index, i2=index + 1)[0] for expression in self.expressions])
class SubspaceLocal(Subspace):
"""Subclass of subspace which implemented methods that can be run locally.
"""
def _toarray(self, list):
return np.array(list)
@property
def pre(self):
self.executor.pre
@property
def post(self):
self.executor.post
def _task(self, task, progressbar=False):
"""Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise"""
if self.delay:
# should return a task or a promise nesting it
return self.executor.schedule(task)
else:
import vaex.utils
callback = None
try:
if progressbar == True:
def update(fraction):
bar.update(fraction)
return True
bar = vaex.utils.progressbar(task.name)
callback = self.executor.signal_progress.connect(update)
elif progressbar:
callback = self.executor.signal_progress.connect(progressbar)
self.executor.schedule(task)
self.df.execute()
result = task.get()
if progressbar == True:
bar.finish()
sys.stdout.write('\n')
return result
finally:
if callback:
self.executor.signal_progress.disconnect(callback)
def minmax(self, progressbar=False):
def min_max_reduce(minmax1, minmax2):
if minmax1 is None:
return minmax2
if minmax2 is None:
return minmax1
result = []
for d in range(self.dimension):
min1, max1 = minmax1[d]
min2, max2 = minmax2[d]
result.append((min(min1, min2), max(max1, max2)))
return result
def min_max_map(thread_index, i1, i2, *blocks):
if self.is_masked or self.df.filtered:
mask = self.df.evaluate_selection_mask("default" if self.is_masked else None, i1=i1, i2=i2, pre_filtered=False)
blocks = [block[mask] for block in blocks]
is_empty = all(~mask)
if is_empty:
return None
# with lock:
# print blocks
# with lock:
# print thread_index, i1, i2, blocks
blocks = [_asfloat(block) for block in blocks]
return [vaex.vaexfast.find_nan_min_max(block) for block in blocks]
if 0: # TODO: implement using statisticNd and benchmark
minmaxes = np.zeros((len(blocks), 2), dtype=float)
minmaxes[:, 0] = np.inf
minmaxes[:, 1] = -np.inf
for i, block in enumerate(blocks):
vaex.vaexfast.statisticNd([], block, minmaxes[i, :], [], [], 2)
# minmaxes[~np.isfinite(minmaxes)] = np.nan
return minmaxes
task = TaskMapReduceLegacy(self.df, self.expressions, min_max_map, min_max_reduce, self._toarray, info=True, name="minmax")
return self._task(task, progressbar=progressbar)
def mean(self):
return self._moment(1)
def _moment(self, moment=1):
def mean_reduce(means_and_counts1, means_and_counts2):
means_and_counts = []
for (mean1, count1), (mean2, count2) in zip(means_and_counts1, means_and_counts2):
means_and_counts.append([np.nansum([mean1 * count1, mean2 * count2]) / (count1 + count2), count1 + count2])
return means_and_counts
def remove_counts(means_and_counts):
return self._toarray(means_and_counts)[:, 0]
def mean_map(thread_index, i1, i2, *blocks):
if self.is_masked or self.df.filtered:
mask = self.df.evaluate_selection_mask("default" if self.is_masked else None, i1=i1, i2=i2, pre_filtered=False)
return [(np.nanmean(block[mask]**moment), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]
else:
return [(np.nanmean(block**moment), np.count_nonzero(~np.isnan(block))) for block in blocks]
task = TaskMapReduceLegacy(self.df, self.expressions, mean_map, mean_reduce, remove_counts, info=True)
return self._task(task)
def var(self, means=None):
# variances are linear, use the mean to reduce
def vars_reduce(vars_and_counts1, vars_and_counts2):
vars_and_counts = []
for (var1, count1), (var2, count2) in zip(vars_and_counts1, vars_and_counts2):
vars_and_counts.append([np.nansum([var1 * count1, var2 * count2]) / (count1 + count2), count1 + count2])
return vars_and_counts
def remove_counts(vars_and_counts):
return self._toarray(vars_and_counts)[:, 0]
if self.is_masked or self.df.filtered:
def var_map(thread_index, i1, i2, *blocks):
mask = self.df.evaluate_selection_mask("default" if self.is_masked else None, i1=i1, i2=i2, pre_filtered=False)
if means is not None:
return [(np.nanmean((block[mask] - mean)**2), np.count_nonzero(~np.isnan(block[mask]))) for block, mean in zip(blocks, means)]
else:
return [(np.nanmean(block[mask]**2), np.count_nonzero(~np.isnan(block[mask]))) for block in blocks]
task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts, info=True)
else:
def var_map(*blocks):
if means is not None:
return [(np.nanmean((block - mean)**2), np.count_nonzero(~np.isnan(block))) for block, mean in zip(blocks, means)]
else:
return [(np.nanmean(block**2), np.count_nonzero(~np.isnan(block))) for block in blocks]
task = TaskMapReduceLegacy(self.df, self.expressions, var_map, vars_reduce, remove_counts)
return self._task(task)
def correlation(self, means=None, vars=None):
if self.dimension != 2:
raise ValueError("correlation is only defined for 2d subspaces, not %dd" % self.dimension)
def do_correlation(means, vars):
meanx, meany = means
sigmax, sigmay = vars[0]**0.5, vars[1]**0.5
def remove_counts_and_normalize(covar_and_count):
covar, counts = covar_and_count
return covar / counts / (sigmax * sigmay)
def covars_reduce(covar_and_count1, covar_and_count2):
if covar_and_count1 is None:
return covar_and_count2
if covar_and_count2 is None:
return covar_and_count1
else:
covar1, count1 = covar_and_count1
covar2, count2 = covar_and_count2
return [np.nansum([covar1, covar2]), count1 + count2]
mask = self.df.mask
def covar_map(thread_index, i1, i2, *blocks):
# return [(np.nanmean((block[mask[i1:i2]]-mean)**2), np.count_nonzero(~np.isnan(block[mask[i1:i2]]))) for block, mean in zip(blocks, means)]
blockx, blocky = blocks
if self.is_masked:
blockx, blocky = blockx[mask[i1:i2]], blocky[mask[i1:i2]]
counts = np.count_nonzero(~(np.isnan(blockx) | np.isnan(blocky)))
if counts == 0:
return None
else:
return np.nansum((blockx - meanx) * (blocky - meany)), counts
task = TaskMapReduceLegacy(self.df, self.expressions, covar_map, covars_reduce, remove_counts_and_normalize, info=True)
return self._task(task)
if means is None:
if self.delay:
means_wrapper = [None]
def do_vars(means):
means_wrapper[0] = means
return self.var(means)
def do_correlation_wrapper(vars):
return do_correlation(means_wrapper[0], vars)
return self.mean().then(do_vars).then(do_correlation_wrapper)
else:
means = self.mean()
vars = self.var(means=means)
return do_correlation(means, vars)
else:
if vars is None:
if self.delay:
def do_correlation_wrapper(vars):
return do_correlation(means, vars)
return self.vars(means=means).then(do_correlation_wrapper)
else:
vars = self.var(means)
return do_correlation(means, vars)
else:
if means is None:
means = self.mean()
if vars is None:
vars = self.var(means=means)
return do_correlation(means, vars)
def sum(self):
def nansum(x): return np.nansum(x, dtype=np.float64)
# TODO: we can speed up significantly using our own nansum, probably the same for var and mean
nansum = vaex.vaexfast.nansum
if self.is_masked or self.df.filtered:
task = TaskMapReduceLegacy(self.df,
self.expressions, lambda thread_index, i1, i2, *blocks: [nansum(block[self.df.evaluate_selection_mask("default" if self.is_masked else None, i1=i1, i2=i2, pre_filtered=False)])
for block in blocks],
lambda a, b: np.array(a) + np.array(b), self._toarray, info=True)
else:
task = TaskMapReduceLegacy(self.df, self.expressions, lambda *blocks: [nansum(block) for block in blocks], lambda a, b: np.array(a) + np.array(b), self._toarray)
return self._task(task)
def histogram(self, limits, size=256, weight=None, progressbar=False, group_by=None, group_limits=None):
expressions = self.expressions
if group_by:
expressions = list(expressions) + [group_by]
limits = list(limits) + [group_limits[:2]] # [[group_limits[0] - 0,5, group_limits[1]+0.5]]
# assert group_limits[2] == 1
size = (group_limits[2],) + (size,) * (len(expressions) - 1)
task = TaskHistogram(self.df, self, expressions, size, limits, masked=self.is_masked, weight=weight)
return self._task(task, progressbar=progressbar)
def bin_mean(self, expression, limits, size=256, progressbar=False, group_by=None, group_limits=None):
# todo, fix progressbar into two...
counts = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits)
weighted = self.histogram(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,
weight=expression)
mean = weighted / counts
mean[counts == 0] = np.nan
return mean
def bin_mean_cyclic(self, expression, max_value, limits, size=256, progressbar=False, group_by=None, group_limits=None):
# todo, fix progressbar into two...
meanx = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,
expression="cos((%s)/%r*2*pi)" % (expression, max_value))
meany = self.bin_mean(limits=limits, size=size, progressbar=progressbar, group_by=group_by, group_limits=group_limits,
expression="sin((%s)/%r*2*pi)" % (expression, max_value))
angles = np.arctan2(meany, meanx)
values = ((angles + 2 * np.pi) % (2 * np.pi)) / (2 * np.pi) * max_value
length = np.sqrt(meanx**2 + meany**2)
length[~np.isfinite(meanx)] = np.nan
return values, length
def mutual_information(self, limits=None, grid=None, size=256):
if limits is None:
limits_done = Task.fulfilled(self.minmax())
else:
limits_done = Task.fulfilled(limits)
if grid is None:
if limits is None:
histogram_done = limits_done.then(lambda limits: self.histogram(limits, size=size))
else:
histogram_done = Task.fulfilled(self.histogram(limits, size=size))
else:
histogram_done = Task.fulfilled(grid)
mutual_information_promise = histogram_done.then(vaex.kld.mutual_information)
return mutual_information_promise if self.delay else mutual_information_promise.get()
def limits_percentage(self, percentage=99.73, square=False):
import scipy.ndimage
limits = []
for expr in self.expressions:
subspace = self.df(expr)
if self.is_selected():
subspace = subspace.selected()
limits_minmax = subspace.minmax()
vmin, vmax = limits_minmax[0]
size = 1024 * 16
counts = subspace.histogram(size=size, limits=limits_minmax)
cumcounts = np.concatenate([[0], np.cumsum(counts)])
cumcounts /= cumcounts.max()
# TODO: this is crude.. see the details!
f = (1 - percentage / 100.) / 2
x = np.linspace(vmin, vmax, size + 1)
l = scipy.interp([f, 1 - f], cumcounts, x)
limits.append(l)
return limits
def limits_sigma(self, sigmas=3, square=False):
if self.delay:
means_wrapper = [None]
def do_vars(means):
means_wrapper[0] = means
return self.var(means)
def do_limits(vars):
stds = vars**0.5
means = means_wrapper[0]
if square:
stds = np.repeat(stds.mean(), len(stds))
return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))
return self.mean().then(do_vars).then(do_limits)
else:
means = self.mean()
stds = self.var(means=means)**0.5
if square:
stds = np.repeat(stds.mean(), len(stds))
return np.array(list(zip(means - sigmas * stds, means + sigmas * stds)))
def _not_needed_current(self):
index = self.df.get_current_row()
def find(thread_index, i1, i2, *blocks):
if (index >= i1) and (index < i2):
return [block[index - i1] for block in blocks]
else:
return None
task = TaskMapReduceLegacy(self.df, self.expressions, find, lambda a, b: a if b is None else b, info=True)
return self._task(task)
def nearest(self, point, metric=None):
metric = metric or [1.] * len(point)
def nearest_in_block(thread_index, i1, i2, *blocks):
if self.is_masked:
mask = self.df.evaluate_selection_mask("default", i1=i1, i2=i2, pre_filtered=False)
if mask.sum() == 0:
return None
blocks = [block[mask] for block in blocks]
distance_squared = np.sum([(blocks[i] - point[i])**2. * metric[i] for i in range(self.dimension)], axis=0)
min_index_global = min_index = np.argmin(distance_squared)
if self.is_masked: # we skipped some indices, so correct for that
min_index_global = np.argmin((np.cumsum(mask) - 1 - min_index)**2)
# with lock:
# print i1, i2, min_index, distance_squared, [block[min_index] for block in blocks]
return min_index_global.item() + i1, distance_squared[min_index].item()**0.5, [block[min_index].item() for block in blocks]
def nearest_reduce(a, b):
if a is None:
return b
if b is None:
return a
if a[1] < b[1]:
return a
else:
return b
if self.is_masked:
pass
task = TaskMapReduceLegacy(self.df,
self.expressions,
nearest_in_block,
nearest_reduce, info=True)
return self._task(task)
| mit |
bsipocz/seaborn | seaborn/categorical.py | 19 | 102299 | from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import desaturate, iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotter(object):
width = .8
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Conver the colors to a common rgb representation
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
# Desaturate a bit because these are patches
if saturation < 1:
colors = [desaturate(c, saturation) for c in colors]
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
# Assign object attributes
self.colors = colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], kws)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, kws):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(color=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(kws.get("boxprops", {}))
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(kws.get("whiskerprops", {}))
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(kws.get("capprops", {}))
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(kws.get("medianprops", {}))
for fly in artist_dict["fliers"]:
fly.update(dict(color=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(kws.get("flierprops", {}))
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
color=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
c=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _StripPlotter(_CategoricalPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_BoxPlotter):
def __init__(self):
pass
def plot(self, ax):
pass
class _CategoricalStatPlotter(_CategoricalPlotter):
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names)
def estimate_statistic(self, estimator, ci, n_boot):
if self.hue_names is None:
statistic = []
confint = []
else:
statistic = [[] for _ in self.plot_data]
confint = [[] for _ in self.plot_data]
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single layer of grouping
# --------------------------------------------
if self.plot_hues is None:
if self.plot_units is None:
stat_data = remove_na(group_data)
unit_data = None
else:
unit_data = self.plot_units[i]
have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)
stat_data = group_data[have]
unit_data = unit_data[have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic.append(np.nan)
else:
statistic.append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint.append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint.append(utils.ci(boots, ci))
# Option 2: we are grouping by a hue layer
# ----------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
if not self.plot_hues[i].size:
statistic[i].append(np.nan)
if ci is not None:
confint[i].append((np.nan, np.nan))
continue
hue_mask = self.plot_hues[i] == hue_level
if self.plot_units is None:
stat_data = remove_na(group_data[hue_mask])
unit_data = None
else:
group_units = self.plot_units[i]
have = pd.notnull(
np.c_[group_data, group_units]
).all(axis=1)
stat_data = group_data[hue_mask & have]
unit_data = group_units[hue_mask & have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic[i].append(np.nan)
else:
statistic[i].append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint[i].append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint[i].append(utils.ci(boots, ci))
# Save the resulting values for plotting
self.statistic = np.array(statistic)
self.confint = np.array(confint)
# Rename the value label to reflect the estimation
if self.value_label is not None:
self.value_label = "{}({})".format(estimator.__name__,
self.value_label)
def draw_confints(self, ax, at_group, confint, colors, **kws):
kws.setdefault("lw", mpl.rcParams["lines.linewidth"] * 1.8)
for at, (ci_low, ci_high), color in zip(at_group,
confint,
colors):
if self.orient == "v":
ax.plot([at, at], [ci_low, ci_high], color=color, **kws)
else:
ax.plot([ci_low, ci_high], [at, at], color=color, **kws)
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax, barpos, self.confint, errcolors)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors, lw=lw)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
zorder=z, lw=lw)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
categorical_data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
)
_categorical_docs.update(_facet_docs)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
if "names" in kwargs:
kwargs.pop("names")
warn = True
if "join_rm" in kwargs:
kwargs.pop("join_rm")
warn = True
msg = ("The boxplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips.sort("size"))
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
msg = ("The violinplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=True, orient=None, color=None, palette=None,
size=7, edgecolor="w", linewidth=1, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.update(dict(s=size ** 2, edgecolor=edgecolor, linewidth=linewidth))
if edgecolor == "gray":
kwargs["edgecolor"] = plotter.gray
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Control strip order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips.sort("size"))
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips, jitter=True)
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_categorical_docs)
def barplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips.sort("size"))
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
def pointplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips.sort("size"))
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
""").format(**_categorical_docs)
def countplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs):
estimator = len
ci = None
n_boot = 0
units = None
errcolor = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise TypeError("Cannot pass values for both `x` and `y`")
else:
raise TypeError("Must pass valus for either `x` or `y`")
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
{size}
{aspect}
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
{legend_out}
{share_xy}
{margin_titles}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
| bsd-3-clause |
brev/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| agpl-3.0 |
CorySimon/CorySimon.github.io | codes/overboarding.py | 1 | 3038 | import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.stats import norm
plt.style.use('bmh')
import matplotlib
matplotlib.rc('lines',linewidth=3)
matplotlib.rc('font',size=16)
# revenue we make from each ticket sold ($)
revenue_per_ticket = 250
# cost of a voucher ($)
cost_per_voucher = 800
# probability any given passenger who bought a ticket will show up for his/her flight
p = 0.9
# total number of seats on the airplane.
nb_total_seats = 100
# Goal: find expected net revenue per flight as a function of `x`, the number of tickets sold beyond capaacity.
# i.e. we are selling `nb_total_seats` + `x` tickets.
# net revenue = (revenue from tickets) - (cost of voucher payoffs to overbook customers)
# We will find net revenue for `x` = 0, 1, 2, ..., N_x
# (Note we only consider `x` >= 0 b/c we at least sell a ticket for each seat!)
N_x = 55
# pre-allocate here. net_revenue[i] := net revenue for x = i.
expected_net_revenue = np.zeros((N_x, ))
## expected net revenue as a function of x
for x in range(N_x):
# mean and variance in binomial distribution for this $x$.
# e.g. mean is referring to the # of customers we expect to show up given we sold (nb_total_seats+x) tickets
mean = (nb_total_seats + x) * p
sig2 = (nb_total_seats + x) * p * (1 - p)
# pre-allocate expected voucher payoffs and ticket revenue we expect for this `x`
expected_voucher_payoffs = 0.0
expected_ticket_revenue = 0.0
# consider the probability that $k$ customers show up to the flight
# anywhere from 0, 1, 2, ..., nb_total_seats+x customers could show up
# ... since we sold nb_total_seats+x tickets!
for k in range(nb_total_seats + x + 1):
# to calculate Pr(N=k| x), governed by binomial dist'n, use normal approximation to binomial
# let Z ~ Normal(0, 1)
# Pr(N=k|x) ~ Prob(l < Z < h)
# subtract cumulative distribution (cdf) functions for this
h = (k + 0.5 - mean) / math.sqrt(sig2) # -0.5 is for continuity correction
l = (k - 0.5 - mean) / math.sqrt(sig2)
prob_k_show_up = norm.cdf(h) - norm.cdf(l)
# calculate ticket revenue given `k` customers show up
ticket_revenue = revenue_per_ticket * np.min([nb_total_seats, k])
expected_ticket_revenue += prob_k_show_up * ticket_revenue
# calculate voucher payoffs
voucher_payoffs = cost_per_voucher * np.max([0, k - nb_total_seats])
expected_voucher_payoffs += prob_k_show_up * voucher_payoffs
expected_net_revenue[x] = expected_ticket_revenue - expected_voucher_payoffs
# plot expected net revenue as a function of `x`
fig = plt.figure()
plt.plot(range(N_x), expected_net_revenue, linewidth=3)
plt.xlim([0, x])
plt.axhline(y=0, linestyle='--', color='k')
plt.axhline(y=nb_total_seats * revenue_per_ticket, linestyle='--', color='r')
plt.xlabel('# tickets beyond capacity ($x$)')
plt.ylabel('Expected revenue (\$)')
plt.tight_layout()
plt.savefig('overbook.png',format='png')
plt.show()
| mit |
EPAENERGYSTAR/epathermostat | thermostat/core.py | 1 | 75403 | from datetime import datetime, timedelta
from collections import namedtuple
from itertools import repeat
import inspect
from warnings import warn
import logging
import pandas as pd
import numpy as np
from scipy.optimize import leastsq
from pkg_resources import resource_stream
from thermostat.regression import runtime_regression
from thermostat import get_version
from thermostat.climate_zone import retrieve_climate_zone
try:
if "0.21." in pd.__version__:
warn(
"WARNING: Pandas version 0.21.x has known issues and is not supported. "
"Please either downgrade to Pandas 0.20.3 or upgrade to the latest Pandas version.")
except TypeError:
pass # Documentation mocks out pd, so ignore if not present.
# Ignore divide-by-zero errors
np.seterr(divide='ignore', invalid='ignore')
CoreDaySet = namedtuple("CoreDaySet", ["name", "daily", "hourly", "start_date", "end_date"])
logger = logging.getLogger('epathermostat')
VAR_MIN_RHU_RUNTIME = 30 * 60 # Unit is in minutes (30 hours * 60 minutes)
RESISTANCE_HEAT_USE_BINS_MIN_TEMP = 0 # Unit is 1 degree F.
RESISTANCE_HEAT_USE_BINS_MAX_TEMP = 60 # Unit is 1 degree F.
RESISTANCE_HEAT_USE_BIN_TEMP_WIDTH = 5 # Unit is 1 degree F.
RESISTANCE_HEAT_USE_BIN_FIRST = list(t for t in range(
RESISTANCE_HEAT_USE_BINS_MIN_TEMP,
RESISTANCE_HEAT_USE_BINS_MAX_TEMP + RESISTANCE_HEAT_USE_BIN_TEMP_WIDTH,
RESISTANCE_HEAT_USE_BIN_TEMP_WIDTH))
RESISTANCE_HEAT_USE_BIN_FIRST_TUPLE = [(RESISTANCE_HEAT_USE_BIN_FIRST[i], RESISTANCE_HEAT_USE_BIN_FIRST[i+1])
for i in range(0, len(RESISTANCE_HEAT_USE_BIN_FIRST) - 1)]
RESISTANCE_HEAT_USE_BIN_SECOND = [-np.inf, 10, 20, 30, 40, 50, 60]
RESISTANCE_HEAT_USE_BIN_SECOND_TUPLE = [(RESISTANCE_HEAT_USE_BIN_SECOND[i], RESISTANCE_HEAT_USE_BIN_SECOND[i+1])
for i in range(0, len(RESISTANCE_HEAT_USE_BIN_SECOND) - 1)]
# FIXME: Turning off these warnings for now
pd.set_option('mode.chained_assignment', None)
class Thermostat(object):
""" Main thermostat data container. Each parameter which contains
timeseries data should be a pandas.Series with a datetimeIndex, and that
each index should be equivalent.
Parameters
----------
thermostat_id : object
An identifier for the thermostat. Can be anything, but should be
identifying (e.g., an ID provided by the manufacturer).
equipment_type : { 0, 1, 2, 3, 4, 5 }
- :code:`0`: Other - e.g. multi-zone multi-stage, modulating. Note: module will
not output savings data for this type.
- :code:`1`: Single stage heat pump with aux and/or emergency heat
- :code:`2`: Single stage heat pump without aux or emergency heat
- :code:`3`: Single stage non heat pump with single-stage central air conditioning
- :code:`4`: Single stage non heat pump without central air conditioning
- :code:`5`: Single stage central air conditioning without central heating
zipcode : str
Installation ZIP code for the thermostat.
station : str
USAF identifier for weather station used to pull outdoor temperature data.
temperature_in : pandas.Series
Contains internal temperature data in degrees Fahrenheit (F),
with resolution of at least 0.5F.
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
temperature_out : pandas.Series
Contains outdoor temperature data as observed by a relevant
weather station in degrees Fahrenheit (F), with resolution of at least
0.5F.
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
cooling_setpoint : pandas.Series
Contains target temperature (setpoint) data in degrees Fahrenheit (F),
with resolution of at least 0.5F used to control cooling equipment.
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
heating_setpoint : pandas.Series
Contains target temperature (setpoint) data in degrees Fahrenheit (F),
with resolution of at least 0.5F used to control heating equipment.
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
cool_runtime : pandas.Series,
Daily runtimes for cooling equipment controlled by the thermostat, measured
in minutes. No datapoint should exceed 1440 mins, which would indicate
over a day of runtime (impossible).
Should be indexed by a pandas.DatetimeIndex with daily frequency (i.e.
:code:`freq='D'`).
heat_runtime : pandas.Series,
Daily runtimes for heating equipment controlled by the thermostat, measured
in minutes. No datapoint should exceed 1440 mins, which would indicate
over a day of runtime (impossible).
Should be indexed by a pandas.DatetimeIndex with daily frequency (i.e.
:code:`freq='D'`).
auxiliary_heat_runtime : pandas.Series,
Hourly runtimes for auxiliary heating equipment controlled by the
thermostat, measured in minutes. Auxiliary heat runtime is counted when
both resistance heating and the compressor are running (for heat pump
systems). No datapoint should exceed 60 mins, which would indicate
over a hour of runtime (impossible).
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
emergency_heat_runtime : pandas.Series,
Hourly runtimes for emergency heating equipment controlled by the
thermostat, measured in minutes. Emergency heat runtime is counted when
resistance heating is running when the compressor is not (for heat pump
systems). No datapoint should exceed 60 mins, which would indicate
over a hour of runtime (impossible).
Should be indexed by a pandas.DatetimeIndex with hourly frequency (i.e.
:code:`freq='H'`).
"""
HEATING_EQUIPMENT_TYPES = set([1, 2, 3, 4])
COOLING_EQUIPMENT_TYPES = set([1, 2, 3, 5])
AUX_EMERG_EQUIPMENT_TYPES = set([1])
def __init__(
self, thermostat_id, equipment_type, zipcode, station,
temperature_in, temperature_out, cooling_setpoint,
heating_setpoint, cool_runtime, heat_runtime,
auxiliary_heat_runtime, emergency_heat_runtime):
self.thermostat_id = thermostat_id
self.equipment_type = equipment_type
self.zipcode = zipcode
self.station = station
self.temperature_in = self._interpolate(temperature_in, method="linear")
self.temperature_out = self._interpolate(temperature_out, method="linear")
self.cooling_setpoint = cooling_setpoint
self.heating_setpoint = heating_setpoint
self.cool_runtime = cool_runtime
self.heat_runtime = heat_runtime
self.auxiliary_heat_runtime = auxiliary_heat_runtime
self.emergency_heat_runtime = emergency_heat_runtime
self.validate()
def validate(self):
self._validate_heating()
self._validate_cooling()
self._validate_aux_emerg()
def _format_rhu(self, rhu_type, low, high, duty_cycle):
format_string = "{rhu_type}_{low:02d}F_to_{high:02d}F"
if low == -np.inf:
format_string = "{rhu_type}_less{high:02d}F"
low = 0 # Don't need this value so we zero it out
if high == np.inf:
format_string = "{rhu_type}_greater{low:02d}F"
high = 0 # Don't need this value so we zero it out
result = format_string.format(
rhu_type=rhu_type,
low=int(low),
high=int(high))
if duty_cycle is not None:
result = '_'.join((result, duty_cycle))
return result
def _validate_heating(self):
if self.equipment_type in self.HEATING_EQUIPMENT_TYPES:
if self.heat_runtime is None:
message = "For thermostat {}, heating runtime data was not provided," \
" despite equipment type of {}, which requires heating data.".format(self.thermostat_id, self.equipment_type)
raise ValueError(message)
if self.heating_setpoint is None:
message = "For thermostat {}, heating setpoint data was not provided," \
" despite equipment type of {}, which requires heating data." \
" If only one setpoint is used, (or if there is no distinction" \
" between the heating and cooling setpoints, please" \
" explicitly provide two copies of the available setpoint data" \
.format(self.thermostat_id, self.equipment_type)
raise ValueError(message)
def _validate_cooling(self):
if self.equipment_type in self.COOLING_EQUIPMENT_TYPES:
if self.cool_runtime is None:
message = "For thermostat {}, cooling runtime data was not provided," \
" despite equipment type of {}, which requires cooling data.".format(self.thermostat_id, self.equipment_type)
raise ValueError(message)
if self.cooling_setpoint is None:
message = "For thermostat {}, cooling setpoint data was not provided," \
" despite equipment type of {}, which requires heating data." \
" If only one setpoint is used, (or if there is no distinction" \
" between the heating and cooling setpoints, please" \
" explicitly provide two copies of the available setpoint data" \
.format(self.thermostat_id, self.equipment_type)
raise ValueError(message)
def _validate_aux_emerg(self):
if self.equipment_type in self.AUX_EMERG_EQUIPMENT_TYPES:
if self.auxiliary_heat_runtime is None or self.emergency_heat_runtime is None:
message = "For thermostat {}, aux and emergency runtime data were not provided," \
" despite equipment type of {}, which requires these columns of data."\
" If none is available, please change to equipment_type 2," \
" or provide columns of 0s".format(self.thermostat_id, self.equipment_type)
raise ValueError(message)
def _interpolate(self, series, method="linear"):
if method not in ["linear"]:
return series
return series.interpolate(method="linear", limit=1, limit_direction="both")
def _protect_heating(self):
function_name = inspect.stack()[1][3]
if self.equipment_type not in self.HEATING_EQUIPMENT_TYPES:
message = "The function '{}', which is heating specific, cannot be" \
" called for equipment_type {}".format(function_name, self.equipment_type)
raise ValueError(message)
def _protect_cooling(self):
function_name = inspect.stack()[1][3]
if self.equipment_type not in self.COOLING_EQUIPMENT_TYPES:
message = "The function '{}', which is cooling specific, cannot be" \
" called for equipment_type {}".format(function_name, self.equipment_type)
raise ValueError(message)
def _protect_aux_emerg(self):
function_name = inspect.stack()[1][3]
if self.equipment_type not in self.AUX_EMERG_EQUIPMENT_TYPES:
message = "The function '{}', which is auxiliary/emergency heating specific, cannot be" \
" called for equipment_type {}".format(function_name, self.equipment_type)
raise ValueError(message)
def get_core_heating_days(self, method="entire_dataset",
min_minutes_heating=30, max_minutes_cooling=0):
""" Determine core heating days from data associated with this thermostat
Parameters
----------
method : {"entire_dataset", "year_mid_to_mid"}, default: "entire_dataset"
Method by which to find core heating day sets.
- "entire_dataset": all heating days in dataset (days with >= 30 min
of heating runtime and no cooling runtime. (default)
- "year_mid_to_mid": groups all heating days (days with >= 30 min
of total heating and no cooling) from July 1 to June 30
(inclusive) into individual core heating day sets. May overlap
with core cooling day sets.
min_minutes_heating : int, default 30
Number of minutes of heating runtime per day required for inclusion
in core heating day set.
max_minutes_cooling : int, default 0
Number of minutes of cooling runtime per day beyond which the day
is considered part of a shoulder season (and is therefore not part
of the core heating day set).
Returns
-------
core_heating_day_sets : list of thermostat.core.CoreDaySet objects
List of core day sets detected; Core day sets are represented as
pandas Series of boolean values, intended to be used as selectors
or masks on the thermostat data at hourly and daily frequencies.
A value of True at a particular index indicates inclusion of
of the data at that index in the core day set. If method is
"entire_dataset", name of core day sets are "heating_ALL"; if method
is "year_mid_to_mid", names of core day sets are of the form
"heating_YYYY-YYYY"
"""
if method not in ["year_mid_to_mid", "entire_dataset"]:
raise NotImplementedError
self._protect_heating()
# compute inclusion thresholds
meets_heating_thresholds = self.heat_runtime >= min_minutes_heating
if self.equipment_type in self.COOLING_EQUIPMENT_TYPES:
meets_cooling_thresholds = self.cool_runtime <= max_minutes_cooling
else:
meets_cooling_thresholds = True
meets_thresholds = meets_heating_thresholds & meets_cooling_thresholds
# enough temperature_in
enough_temp_in = \
self.temperature_in.groupby(self.temperature_in.index.date) \
.apply(lambda x: x.isnull().sum() <= 2)
enough_temp_out = \
self.temperature_out.groupby(self.temperature_out.index.date) \
.apply(lambda x: x.isnull().sum() <= 2)
meets_thresholds &= enough_temp_in & enough_temp_out
data_start_date = np.datetime64(self.heat_runtime.index[0])
data_end_date = np.datetime64(self.heat_runtime.index[-1])
if method == "year_mid_to_mid":
# find all potential core heating day ranges
start_year = data_start_date.item().year - 1
end_year = data_end_date.item().year + 1
potential_core_day_sets = zip(range(start_year, end_year),
range(start_year + 1, end_year + 1))
# for each potential core day set, look for core heating days.
core_heating_day_sets = []
for start_year_, end_year_ in potential_core_day_sets:
core_day_set_start_date = np.datetime64(datetime(start_year_, 7, 1))
core_day_set_end_date = np.datetime64(datetime(end_year_, 7, 1))
start_date = max(core_day_set_start_date, data_start_date).item()
end_date = min(core_day_set_end_date, data_end_date).item()
in_range = self._get_range_boolean(self.heat_runtime.index,
start_date, end_date)
inclusion_daily = pd.Series(in_range & meets_thresholds,
index=self.heat_runtime.index)
if any(inclusion_daily):
name = "heating_{}-{}".format(start_year_, end_year_)
inclusion_hourly = self._get_hourly_boolean(inclusion_daily)
core_day_set = CoreDaySet(name, inclusion_daily, inclusion_hourly,
start_date, end_date)
core_heating_day_sets.append(core_day_set)
return core_heating_day_sets
elif method == "entire_dataset":
inclusion_daily = pd.Series(meets_thresholds, index=self.heat_runtime.index)
inclusion_hourly = self._get_hourly_boolean(inclusion_daily)
core_heating_day_set = CoreDaySet(
"heating_ALL",
inclusion_daily,
inclusion_hourly,
data_start_date,
data_end_date)
# returned as list for consistency
core_heating_day_sets = [core_heating_day_set]
return core_heating_day_sets
def get_core_cooling_days(self, method="entire_dataset",
min_minutes_cooling=30, max_minutes_heating=0):
""" Determine core cooling days from data associated with this
thermostat.
Parameters
----------
method : {"entire_dataset", "year_end_to_end"}, default: "entire_dataset"
Method by which to find core cooling days.
- "entire_dataset": all cooling days in dataset (days with >= 30 min
of cooling runtime and no heating runtime.
- "year_end_to_end": groups all cooling days (days with >= 30 min
of total cooling and no heating) from January 1 to December 31
into individual core cooling sets.
min_minutes_cooling : int, default 30
Number of minutes of core cooling runtime per day required for
inclusion in core cooling day set.
max_minutes_heating : int, default 0
Number of minutes of heating runtime per day beyond which the day is
considered part of a shoulder season (and is therefore not part of
the core cooling day set).
Returns
-------
core_cooling_day_sets : list of thermostat.core.CoreDaySet objects
List of core day sets detected; Core day sets are represented as
pandas Series of boolean values, intended to be used as selectors
or masks on the thermostat data at hourly and daily frequencies.
A value of True at a particular index indicates inclusion of
of the data at that index in the core day set. If method is
"entire_dataset", name of core day set is "cooling_ALL"; if method
is "year_end_to_end", names of core day sets are of the form
"cooling_YYYY"
"""
if method not in ["year_end_to_end", "entire_dataset"]:
raise NotImplementedError
self._protect_cooling()
# find all potential core cooling day ranges
data_start_date = np.datetime64(self.cool_runtime.index[0])
data_end_date = np.datetime64(self.cool_runtime.index[-1])
# compute inclusion thresholds
if self.equipment_type in self.HEATING_EQUIPMENT_TYPES:
meets_heating_thresholds = self.heat_runtime <= max_minutes_heating
else:
meets_heating_thresholds = True
meets_cooling_thresholds = self.cool_runtime >= min_minutes_cooling
meets_thresholds = meets_heating_thresholds & meets_cooling_thresholds
# enough temperature_in
enough_temp_in = \
self.temperature_in.groupby(self.temperature_in.index.date) \
.apply(lambda x: x.isnull().sum() <= 2)
enough_temp_out = \
self.temperature_out.groupby(self.temperature_out.index.date) \
.apply(lambda x: x.isnull().sum() <= 2)
meets_thresholds &= enough_temp_in & enough_temp_out
if method == "year_end_to_end":
start_year = data_start_date.item().year
end_year = data_end_date.item().year
potential_core_day_sets = range(start_year, end_year + 1)
# for each potential core day set, look for cooling days.
core_cooling_day_sets = []
for year in potential_core_day_sets:
core_day_set_start_date = np.datetime64(datetime(year, 1, 1))
core_day_set_end_date = np.datetime64(datetime(year + 1, 1, 1))
start_date = max(core_day_set_start_date, data_start_date).item()
end_date = min(core_day_set_end_date, data_end_date).item()
in_range = self._get_range_boolean(self.cool_runtime.index,
start_date, end_date)
inclusion_daily = pd.Series(in_range & meets_thresholds,
index=self.cool_runtime.index)
if any(inclusion_daily):
name = "cooling_{}".format(year)
inclusion_hourly = self._get_hourly_boolean(inclusion_daily)
core_day_set = CoreDaySet(name, inclusion_daily, inclusion_hourly,
start_date, end_date)
core_cooling_day_sets.append(core_day_set)
return core_cooling_day_sets
elif method == "entire_dataset":
inclusion_daily = pd.Series(meets_thresholds, index=self.cool_runtime.index)
inclusion_hourly = self._get_hourly_boolean(inclusion_daily)
core_day_set = CoreDaySet(
"cooling_ALL",
inclusion_daily,
inclusion_hourly,
data_start_date,
data_end_date)
core_cooling_day_sets = [core_day_set]
return core_cooling_day_sets
def _get_range_boolean(self, dt_index, start_date, end_date):
after_start = dt_index >= start_date
before_end = dt_index < end_date
return after_start & before_end
def _get_hourly_boolean(self, daily_boolean):
values = np.repeat(daily_boolean.values, 24)
index = pd.date_range(start=daily_boolean.index[0],
periods=daily_boolean.index.shape[0] * 24, freq="H")
hourly_boolean = pd.Series(values, index)
return hourly_boolean
def total_heating_runtime(self, core_day_set):
""" Calculates total heating runtime.
Parameters
----------
core_day_set : thermostat.core.CoreDaySet
Core day set for which to calculate total runtime.
Returns
-------
total_runtime : float
Total heating runtime.
"""
self._protect_heating()
return self.heat_runtime[core_day_set.daily].sum()
def total_auxiliary_heating_runtime(self, core_day_set):
""" Calculates total auxiliary heating runtime.
Parameters
----------
core_day_set : thermostat.core.CoreDaySet
Core day set for which to calculate total runtime.
Returns
-------
total_runtime : float
Total auxiliary heating runtime.
"""
self._protect_aux_emerg()
return self.auxiliary_heat_runtime[core_day_set.hourly].sum()
def total_emergency_heating_runtime(self, core_day_set):
""" Calculates total emergency heating runtime.
Parameters
----------
core_day_set : thermostat.core.CoreDaySet
Core day set for which to calculate total runtime.
Returns
-------
total_runtime : float
Total heating runtime.
"""
self._protect_aux_emerg()
return self.emergency_heat_runtime[core_day_set.hourly].sum()
def total_cooling_runtime(self, core_day_set):
""" Calculates total cooling runtime.
Parameters
----------
core_day_set : thermostat.core.CoreDaySet
Core day set for which to calculate total runtime.
Returns
-------
total_runtime : float
Total cooling runtime.
"""
self._protect_cooling()
return self.cool_runtime[core_day_set.daily].sum()
def get_resistance_heat_utilization_runtime(self, core_heating_day_set):
""" Calculates resistance heat utilization runtime and filters based on
the core heating days
Parameters
----------
core_heating_day_set : thermostat.core.CoreDaySet
Core heating day set for which to calculate total runtime.
Returns
-------
runtime_temp : pandas.DataFrame or None
A pandas DataFrame which includes the outdoor temperature, heat
runtime, aux runtime, and emergency runtime, filtered by the core
heating day set. Returns None if the thermostat does
not control the appropriate equipment.
"""
self._protect_aux_emerg()
if self.equipment_type != 1:
return None
in_core_day_set_daily = self._get_range_boolean(
core_heating_day_set.daily.index,
core_heating_day_set.start_date,
core_heating_day_set.end_date)
# convert hourly to daily
temp_out_daily = self.temperature_out.resample('D').mean()
aux_daily = self.auxiliary_heat_runtime.resample('D').sum()
emg_daily = self.emergency_heat_runtime.resample('D').sum()
# Build the initial DataFrame based on daily readings
runtime_temp = pd.DataFrame()
runtime_temp['temperature'] = temp_out_daily
runtime_temp['heat_runtime'] = self.heat_runtime
runtime_temp['aux_runtime'] = aux_daily
runtime_temp['emg_runtime'] = emg_daily
runtime_temp['in_core_daily'] = in_core_day_set_daily
runtime_temp['total_minutes'] = 1440 # default number of minutes per day
# Filter out records that aren't part of the core day set
runtime_temp = runtime_temp[runtime_temp['in_core_daily'].map(lambda x: x is True)]
return runtime_temp
def get_resistance_heat_utilization_bins(self, runtime_temp, bins, core_heating_day_set, min_runtime_minutes=None):
""" Calculates the resistance heat utilization in
bins (provided by the bins parameter)
Parameters
----------
runtime_temp: DataFrame
Runtime Temperatures Dataframe from get_resistance_heat_utilization_runtime
bins : list
List of the bins (rightmost-edge aligned) for binning
core_heating_day_set : thermostat.core.CoreDaySet
Core heating day set for which to calculate total runtime.
Returns
-------
RHUs : pandas.DataFrame or None
Resistance heat utilization for each temperature bin, ordered
ascending by temperature bin. Returns None if the thermostat does
not control the appropriate equipment or if the runtime_temp is None.
"""
self._protect_aux_emerg()
if self.equipment_type != 1:
return None
if runtime_temp is None:
return None
# Create the bins and group by them
runtime_temp['bins'] = pd.cut(runtime_temp['temperature'], bins)
runtime_rhu = runtime_temp.groupby('bins')['heat_runtime', 'aux_runtime', 'emg_runtime', 'total_minutes'].sum()
# Calculate the RHU based on the bins
runtime_rhu['rhu'] = (runtime_rhu['aux_runtime'] + runtime_rhu['emg_runtime']) / (runtime_rhu['heat_runtime'] + runtime_rhu['emg_runtime'])
# Currently treating aux_runtime as separate from heat_runtime
runtime_rhu['total_runtime'] = runtime_rhu.heat_runtime + runtime_rhu.aux_runtime + runtime_rhu.emg_runtime
# Changed to use the number of minutes per eligible day
runtime_rhu['aux_duty_cycle'] = runtime_rhu.aux_runtime / runtime_rhu.total_minutes
runtime_rhu['emg_duty_cycle'] = runtime_rhu.emg_runtime / runtime_rhu.total_minutes
runtime_rhu['compressor_duty_cycle'] = runtime_rhu.heat_runtime / runtime_rhu.total_minutes
# If we're passed min_runtime_minutes (RHU2) then treat the thermostat as not having run during that period
if min_runtime_minutes:
runtime_rhu['rhu'].loc[runtime_rhu.total_runtime < min_runtime_minutes] = np.nan
runtime_rhu['aux_duty_cycle'].loc[runtime_rhu.total_runtime < min_runtime_minutes] = np.nan
runtime_rhu['emg_duty_cycle'].loc[runtime_rhu.total_runtime < min_runtime_minutes] = np.nan
runtime_rhu['compressor_duty_cycle'].loc[runtime_rhu.total_runtime < min_runtime_minutes] = np.nan
runtime_rhu['total_runtime'].loc[runtime_rhu.total_runtime < min_runtime_minutes] = np.nan
runtime_rhu['data_is_nonsense'] = (runtime_rhu['aux_runtime'] > runtime_rhu['heat_runtime'])
runtime_rhu.loc[runtime_rhu.data_is_nonsense == True, 'rhu'] = np.nan # noqa: E712
if runtime_rhu.data_is_nonsense.any():
for item in runtime_rhu.itertuples():
if item.data_is_nonsense:
warn(
'WARNING: '
'aux heat runtime %s > compressor runtime %s '
'for %sF <= temperature < %sF '
'for thermostat_id %s '
'from %s to %s inclusive' % (
item.aux_runtime,
item.heat_runtime,
item.Index.left,
item.Index.right,
self.thermostat_id,
core_heating_day_set.start_date,
core_heating_day_set.end_date))
return runtime_rhu
def get_ignored_days(self, core_day_set):
""" Determine how many days are ignored for a particular core day set
Returns
-------
n_both : int
Number of days excluded from core day set because of presence of
both heating and cooling runtime.
n_days_insufficient : int
Number of days excluded from core day set because of null runtime
data.
"""
in_range = self._get_range_boolean(
core_day_set.daily.index,
core_day_set.start_date,
core_day_set.end_date)
if self.equipment_type in self.HEATING_EQUIPMENT_TYPES:
has_heating = self.heat_runtime > 0
null_heating = pd.isnull(self.heat_runtime)
else:
has_heating = False
null_heating = False # shouldn't be counted, so False, not True
if self.equipment_type in self.COOLING_EQUIPMENT_TYPES:
has_cooling = self.cool_runtime > 0
null_cooling = pd.isnull(self.cool_runtime)
else:
has_cooling = False
null_cooling = False # shouldn't be counted, so False, not True
n_both = (in_range & has_heating & has_cooling).sum()
n_days_insufficient = (in_range & (null_heating | null_cooling)).sum()
return n_both, n_days_insufficient
def get_core_day_set_n_days(self, core_day_set):
""" Returns number of days in the core day set.
"""
return int(core_day_set.daily.sum())
def get_inputfile_date_range(self, core_day_set):
""" Returns number of days of data provided in input data file.
"""
delta = (core_day_set.end_date - core_day_set.start_date)
if isinstance(delta, timedelta):
return delta.days
else:
try:
result = int(delta.astype('timedelta64[D]') / np.timedelta64(1, 'D'))
except ZeroDivisionError:
logger.debug(
'Date Range divided by zero: %s / %s '
'for thermostat_id %s' % (
delta.astype('timedelta64[D]'), np.timedelta64(1, 'D'),
self.thermostat_id))
result = np.nan
return result
def get_cooling_demand(self, core_cooling_day_set):
"""
Calculates a measure of cooling demand using the hourlyavgCTD method.
Starting with an assumed value of zero for Tau :math:`(\\tau_c)`,
calculate the daily Cooling Thermal Demand :math:`(\\text{daily CTD}_d)`, as follows
:math:`\\text{daily CTD}_d = \\frac{\sum_{i=1}^{24} [\\tau_c - \\text{hourly} \Delta T_{d.n}]_{+}}{24}`, where
:math:`\\text{hourly} \Delta T_{d.n} (^{\circ} F) = \\text{hourly indoor} T_{d.n} - \\text{hourly outdoor} T_{d.n}`, and
:math:`d` is the core cooling day; :math:`\left(001, 002, 003 ... x \\right)`,
:math:`n` is the hour; :math:`\left(01, 02, 03 ... 24 \\right)`,
:math:`\\tau_c` (cooling) is the :math:`\Delta T` associated with :math:`CTD=0` (zero cooling runtime), and
:math:`[]_{+}` indicates that the term is zero if its value would be negative.
For the set of all core cooling days in the CT interval data file, use
ratio estimation to calculate :math:`\\alpha_c`, the home's
responsiveness to cooling, which should be positive.
:math:`\\alpha_c \left(\\frac{\\text{minutes}}{^{\circ} F}\\right) = \\frac{RT_\\text{actual cool}}{\sum_{d=1}^{x} \\text{daily CTD}_d}`, where
:math:`RT_\\text{actual cool}` is the sum of cooling run times for all core cooling days in the CT interval data file.
For the set of all core cooling days in the CT interval data file,
optimize :math:`\\tau_c` that results in minimization of the sum of
squares of the difference between daily run times reported by the CT,
and calculated daily cooling run times.
Next recalculate :math:`\\alpha_c` (in accordance with the above step)
and record the model's parameters :math:`\left(\\alpha_c, \\tau_c \\right)`
Parameters
----------
core_cooling_day_set : thermostat.core.CoreDaySet
Core day set over which to calculate cooling demand.
Returns
-------
demand : pd.Series
Daily demand in the core heating day set as calculated using the
method described above.
tau : float
Estimate of :math:`\\tau_c`.
alpha : float
Estimate of :math:`\\alpha_c`
mse : float
Mean squared error in runtime estimates.
rmse : float
Root mean squared error in runtime estimates.
cvrmse : float
Coefficient of variation of root mean squared error in runtime estimates.
mape : float
Mean absolute percent error
mae : float
Mean absolute error
"""
self._protect_cooling()
core_day_set_temp_in = self.temperature_in[core_cooling_day_set.hourly]
core_day_set_temp_out = self.temperature_out[core_cooling_day_set.hourly]
core_day_set_deltaT = core_day_set_temp_in - core_day_set_temp_out
daily_index = core_cooling_day_set.daily[core_cooling_day_set.daily].index
def calc_cdd(tau):
hourly_cdd = (tau - core_day_set_deltaT).apply(lambda x: np.maximum(x, 0))
# Note - `x / 24` this should be thought of as a unit conversion, not an average.
return np.array([cdd.sum() / 24 for day, cdd in hourly_cdd.groupby(core_day_set_deltaT.index.date)])
daily_runtime = self.cool_runtime[core_cooling_day_set.daily]
total_runtime = daily_runtime.sum()
def calc_estimates(tau):
cdd = calc_cdd(tau)
total_cdd = np.sum(cdd)
try:
alpha_estimate = total_runtime / total_cdd
except ZeroDivisionError:
logger.debug(
'Alpha Estimate divided by zero: %s / %s'
'for thermostat %s' % (
total_runtime, total_cdd,
self.thermostat_id))
alpha_estimate = np.nan
runtime_estimate = cdd * alpha_estimate
errors = daily_runtime - runtime_estimate
return cdd, alpha_estimate, errors
def estimate_errors(tau_estimate):
_, _, errors = calc_estimates(tau_estimate)
return errors
tau_starting_guess = 0
try:
y, _ = leastsq(estimate_errors, tau_starting_guess)
except TypeError: # len 0
assert daily_runtime.shape[0] == 0 # make sure no other type errors are sneaking in
return pd.Series([], index=daily_index), np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
tau_estimate = y[0]
cdd, alpha_estimate, errors = calc_estimates(tau_estimate)
mse = np.nanmean((errors)**2)
rmse = mse ** 0.5
mean_daily_runtime = np.nanmean(daily_runtime)
try:
cvrmse = rmse / mean_daily_runtime
except ZeroDivisionError:
logger.debug(
'CVRMSE divided by zero: %s / %s '
'for thermostat_id %s ' % (
rmse, mean_daily_runtime,
self.thermostat_id))
cvrmse = np.nan
mape = np.nanmean(np.absolute(errors / mean_daily_runtime))
mae = np.nanmean(np.absolute(errors))
return pd.Series(cdd, index=daily_index), tau_estimate, alpha_estimate, mse, rmse, cvrmse, mape, mae
def get_heating_demand(self, core_heating_day_set):
"""
Calculates a measure of heating demand using the hourlyavgCTD method.
:math:`\\text{daily HTD}_d = \\frac{\sum_{i=1}^{24} [\\text{hourly} \Delta T_{d.n} - \\tau_h]_{+}}{24}`, where
:math:`\\text{hourly} \Delta T_{d.n} (^{\circ} F) = \\text{hourly indoor} T_{d.n} - \\text{hourly outdoor} T_{d.n}`, and
:math:`d` is the core heating day; :math:`\left(001, 002, 003 ... x \\right)`,
:math:`n` is the hour; :math:`\left(01, 02, 03 ... 24 \\right)`,
:math:`\\tau_h` (heating) is the :math:`\Delta T` associated with :math:`HTD=0`, reflecting that homes with no heat running tend to be warmer that the outdoors, and
:math:`[]_{+}` indicates that the term is zero if its value would be negative.
For the set of all core heating days in the CT interval data file, use
ratio estimation to calculate :math:`\\alpha_h`, the home's
responsiveness to heating, which should be positive.
:math:`\\alpha_h \left(\\frac{\\text{minutes}}{^{\circ} F}\\right) = \\frac{RT_\\text{actual heat}}{\sum_{d=1}^{x} \\text{daily HTD}_d}`, where
:math:`RT_\\text{actual heat}` is the sum of heating run times for all core heating days in the CT interval data file.
For the set of all core heating days in the CT interval data file,
optimize :math:`\\tau_h` that results in minimization of the sum of
squares of the difference between daily run times reported by the CT,
and calculated daily heating run times.
Next recalculate :math:`\\alpha_h` (in accordance with the above step)
and record the model's parameters :math:`\left(\\alpha_h, \\tau_h \\right)`
Parameters
----------
core_heating_day_set : array_like
Core day set over which to calculate heating demand.
Returns
-------
demand : pd.Series
Daily demand in the core heating day set as calculated using the
method described above.
tau : float
Estimate of :math:`\\tau_h`.
alpha : float
Estimate of :math:`\\alpha_h`
mse : float
Mean squared error in runtime estimates.
rmse : float
Root mean squared error in runtime estimates.
cvrmse : float
Coefficient of variation of root mean squared error in runtime estimates.
mape : float
Mean absolute percent error
mae : float
Mean absolute error
"""
self._protect_heating()
core_day_set_temp_in = self.temperature_in[core_heating_day_set.hourly]
core_day_set_temp_out = self.temperature_out[core_heating_day_set.hourly]
core_day_set_deltaT = core_day_set_temp_in - core_day_set_temp_out
daily_index = core_heating_day_set.daily[core_heating_day_set.daily].index
def calc_hdd(tau):
hourly_hdd = (core_day_set_deltaT - tau).apply(lambda x: np.maximum(x, 0))
# Note - this `x / 24` should be thought of as a unit conversion, not an average.
return np.array([hdd.sum() / 24 for day, hdd in hourly_hdd.groupby(core_day_set_deltaT.index.date)])
daily_runtime = self.heat_runtime[core_heating_day_set.daily]
total_runtime = daily_runtime.sum()
def calc_estimates(tau):
hdd = calc_hdd(tau)
total_hdd = np.sum(hdd)
try:
alpha_estimate = total_runtime / total_hdd
except ZeroDivisionError:
logger.debug(
'alpha_estimate divided by zero: %s / %s '
'for thermostat_id %s ' % (
total_runtime, total_hdd,
self.thermostat_id))
alpha_estimate = np.nan
runtime_estimate = hdd * alpha_estimate
errors = daily_runtime - runtime_estimate
return hdd, alpha_estimate, errors
def estimate_errors(tau_estimate):
_, _, errors = calc_estimates(tau_estimate)
return errors
tau_starting_guess = 0
try:
y, _ = leastsq(estimate_errors, tau_starting_guess)
except TypeError: # len 0
assert daily_runtime.shape[0] == 0 # make sure no other type errors are sneaking in
return pd.Series([], index=daily_index), np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
tau_estimate = y[0]
hdd, alpha_estimate, errors = calc_estimates(tau_estimate)
mse = np.nanmean((errors)**2)
rmse = mse ** 0.5
mean_daily_runtime = np.nanmean(daily_runtime)
try:
cvrmse = rmse / mean_daily_runtime
except ZeroDivisionError:
logger.warn(
'CVRMSE divided by zero: %s / %s '
'for thermostat_id %s ' % (
rmse, mean_daily_runtime,
self.thermostat_id))
cvrmse = np.nan
mape = np.nanmean(np.absolute(errors / mean_daily_runtime))
mae = np.nanmean(np.absolute(errors))
return (
pd.Series(hdd, index=daily_index),
tau_estimate,
alpha_estimate,
mse,
rmse,
cvrmse,
mape,
mae
)
def get_core_cooling_day_baseline_setpoint(self, core_cooling_day_set,
method='tenth_percentile', source='temperature_in'):
""" Calculate the core cooling day baseline setpoint (comfort
temperature).
Parameters
----------
core_cooling_day_set : thermost.core.CoreDaySet
Core cooling days over which to calculate baseline cooling setpoint.
method : {"tenth_percentile"}, default: "tenth_percentile"
Method to use in calculation of the baseline.
- "tenth_percentile": 10th percentile of source temperature.
(Either cooling setpoint or temperature in).
source : {"cooling_setpoint", "temperature_in"}, default "temperature_in"
The source of temperatures to use in baseline calculation.
Returns
-------
baseline : float
The baseline cooling setpoint for the core cooling days as determined
by the given method.
"""
self._protect_cooling()
if method != 'tenth_percentile':
raise NotImplementedError
if source == 'cooling_setpoint':
return self.cooling_setpoint[core_cooling_day_set.hourly].dropna().quantile(.1)
elif source == 'temperature_in':
return self.temperature_in[core_cooling_day_set.hourly].dropna().quantile(.1)
else:
raise NotImplementedError
def get_core_heating_day_baseline_setpoint(self, core_heating_day_set,
method='ninetieth_percentile', source='temperature_in'):
""" Calculate the core heating day baseline setpoint (comfort temperature).
Parameters
----------
core_heating_day_set : thermostat.core.CoreDaySet
Core heating days over which to calculate baseline heating setpoint.
method : {"ninetieth_percentile"}, default: "ninetieth_percentile"
Method to use in calculation of the baseline.
- "ninetieth_percentile": 90th percentile of source temperature.
(Either heating setpoint or indoor temperature).
source : {"heating_setpoint", "temperature_in"}, default "temperature_in"
The source of temperatures to use in baseline calculation.
Returns
-------
baseline : float
The baseline heating setpoint for the heating day as determined
by the given method.
"""
self._protect_heating()
if method != 'ninetieth_percentile':
raise NotImplementedError
if source == 'heating_setpoint':
return self.heating_setpoint[core_heating_day_set.hourly].dropna().quantile(.9)
elif source == 'temperature_in':
return self.temperature_in[core_heating_day_set.hourly].dropna().quantile(.9)
else:
raise NotImplementedError
def get_baseline_cooling_demand(self, core_cooling_day_set, temp_baseline, tau):
""" Calculate baseline cooling demand for a particular core cooling
day set and fitted physical parameters.
:math:`\\text{daily CTD base}_d = \\frac{\sum_{i=1}^{24} [\\tau_c - \\text{hourly } \Delta T \\text{ base cool}_{d.n}]_{+}}{24}`, where
:math:`\\text{hourly } \Delta T \\text{ base cool}_{d.n} (^{\circ} F) = \\text{base heat} T_{d.n} - \\text{hourly outdoor} T_{d.n}`, and
:math:`d` is the core cooling day; :math:`\left(001, 002, 003 ... x \\right)`,
:math:`n` is the hour; :math:`\left(01, 02, 03 ... 24 \\right)`,
:math:`\\tau_c` (cooling), determined earlier, is a constant that is part of the CT/home's thermal/HVAC cooling run time model, and
:math:`[]_{+}` indicates that the term is zero if its value would be negative.
Parameters
----------
core_cooling_day_set : thermostat.core.CoreDaySet
Core cooling days over which to calculate baseline cooling demand.
temp_baseline : float
Baseline comfort temperature
tau : float, default: None
From fitted demand model.
Returns
-------
baseline_cooling_demand : pandas.Series
A series containing baseline daily heating demand for the core
cooling day set.
"""
self._protect_cooling()
hourly_temp_out = self.temperature_out[core_cooling_day_set.hourly]
hourly_cdd = (tau - (temp_baseline - hourly_temp_out)).apply(lambda x: np.maximum(x, 0))
demand = np.array([cdd.sum() / 24 for day, cdd in hourly_cdd.groupby(hourly_temp_out.index.date)])
index = core_cooling_day_set.daily[core_cooling_day_set.daily].index
return pd.Series(demand, index=index)
def get_baseline_heating_demand(self, core_heating_day_set, temp_baseline, tau):
""" Calculate baseline heating demand for a particular core heating day
set and fitted physical parameters.
:math:`\\text{daily HTD base}_d = \\frac{\sum_{i=1}^{24} [\\text{hourly } \Delta T \\text{ base heat}_{d.n} - \\tau_h]_{+}}{24}`, where
:math:`\\text{hourly } \Delta T \\text{ base heat}_{d.n} (^{\circ} F) = \\text{base cool} T_{d.n} - \\text{hourly outdoor} T_{d.n}`, and
:math:`d` is the core heating day; :math:`\left(001, 002, 003 ... x \\right)`,
:math:`n` is the hour; :math:`\left(01, 02, 03 ... 24 \\right)`,
:math:`\\tau_h` (heating), determined earlier, is a constant that is part of the CT/home's thermal/HVAC heating run time model, and
:math:`[]_{+}` indicates that the term is zero if its value would be negative.
Parameters
----------
core_heating_day_set : thermostat.core.CoreDaySet
Core heating days over which to calculate baseline cooling demand.
temp_baseline : float
Baseline comfort temperature
tau : float, default: None
From fitted demand model.
Returns
-------
baseline_heating_demand : pandas.Series
A series containing baseline daily heating demand for the core heating days.
"""
self._protect_heating()
hourly_temp_out = self.temperature_out[core_heating_day_set.hourly]
hourly_hdd = (temp_baseline - hourly_temp_out - tau).apply(lambda x: np.maximum(x, 0))
demand = np.array([hdd.sum() / 24 for day, hdd in hourly_hdd.groupby(hourly_temp_out.index.date)])
index = core_heating_day_set.daily[core_heating_day_set.daily].index
return pd.Series(demand, index=index)
def get_baseline_cooling_runtime(self, baseline_cooling_demand, alpha):
""" Calculate baseline cooling runtime given baseline cooling demand
and fitted physical parameters.
:math:`RT_{\\text{base cool}} (\\text{minutes}) = \\alpha_c \cdot \\text{daily CTD base}_d`
Parameters
----------
baseline_cooling_demand : pandas.Series
A series containing estimated daily baseline cooling demand.
alpha : float
Slope of fitted line
Returns
-------
baseline_cooling_runtime : pandas.Series
A series containing estimated daily baseline cooling runtime.
"""
return np.maximum(alpha * (baseline_cooling_demand), 0)
def get_baseline_heating_runtime(self, baseline_heating_demand, alpha):
""" Calculate baseline heating runtime given baseline heating demand.
and fitted physical parameters.
:math:`RT_{\\text{base heat}} (\\text{minutes}) = \\alpha_h \cdot \\text{daily HTD base}_d`
Parameters
----------
baseline_heating_demand : pandas.Series
A series containing estimated daily baseline heating demand.
alpha : float
Slope of fitted line
Returns
-------
baseline_heating_runtime : pandas.Series
A series containing estimated daily baseline heating runtime.
"""
return np.maximum(alpha * (baseline_heating_demand), 0)
def get_daily_avoided_cooling_runtime(
self, baseline_runtime, core_cooling_day_set):
return baseline_runtime - self.cool_runtime[core_cooling_day_set]
def get_daily_avoided_heating_runtime(
self, baseline_runtime, core_heating_day_set):
return baseline_runtime - self.heat_runtime[core_heating_day_set]
def calculate_epa_field_savings_metrics(self,
core_cooling_day_set_method="entire_dataset",
core_heating_day_set_method="entire_dataset",
climate_zone_mapping=None):
""" Calculates metrics for connected thermostat savings as defined by
the specification defined by the EPA Energy Star program and stakeholders.
Parameters
----------
core_cooling_day_set_method : {"entire_dataset", "year_end_to_end"}, default: "entire_dataset"
Method by which to find core cooling day sets.
- "entire_dataset": all core cooling days in dataset (days with >= 1
hour of cooling runtime and no heating runtime.
- "year_end_to_end": groups all core cooling days (days with >= 1 hour of total
cooling and no heating) from January 1 to December 31 into
independent core cooling day sets.
core_heating_day_set_method : {"entire_dataset", "year_mid_to_mid"}, default: "entire_dataset"
Method by which to find core heating day sets.
- "entire_dataset": all core heating days in dataset (days with >= 1
hour of heating runtime and no cooling runtime.
- "year_mid_to_mid": groups all core heating days (days with >= 1 hour
of total heating and no cooling) from July 1 to June 30 into
independent core heating day sets.
climate_zone_mapping : filename, default: None
A mapping from climate zone to zipcode. If None is provided, uses
default zipcode to climate zone mapping provided in tutorial.
:download:`default mapping <./resources/Building America Climate Zone to Zipcode Database_Rev2_2016.09.08.csv>`
Returns
-------
metrics : list
list of dictionaries of output metrics; one per set of core heating
or cooling days.
"""
retval = retrieve_climate_zone(climate_zone_mapping, self.zipcode)
climate_zone = retval.climate_zone
baseline_regional_cooling_comfort_temperature = retval.baseline_regional_cooling_comfort_temperature
baseline_regional_heating_comfort_temperature = retval.baseline_regional_heating_comfort_temperature
metrics = []
def avoided(baseline, observed):
return baseline - observed
def percent_savings(avoided, baseline):
try:
savings = (avoided.mean() / baseline.mean()) * 100.0
except ZeroDivisionError:
logger.debug(
'percent_savings divided by zero: %s / %s '
'for thermostat_id %s ' % (
avoided.mean(), baseline.mean(),
self.thermostat_id))
savings = np.nan
return savings
if self.equipment_type in self.COOLING_EQUIPMENT_TYPES:
for core_cooling_day_set in self.get_core_cooling_days(
method=core_cooling_day_set_method):
baseline10_comfort_temperature = \
self.get_core_cooling_day_baseline_setpoint(core_cooling_day_set)
daily_runtime = self.cool_runtime[core_cooling_day_set.daily]
(
demand,
tau,
alpha,
mse,
rmse,
cvrmse,
mape,
mae,
) = self.get_cooling_demand(core_cooling_day_set)
total_runtime_core_cooling = daily_runtime.sum()
n_days = core_cooling_day_set.daily.sum()
if np.isnan(total_runtime_core_cooling):
warn(
"WARNING: Total Runtime Core Cooling Days is nan. "
"This may mean that you have pandas 0.21.x installed "
"(which is not supported).")
if n_days == 0:
warn(
"WARNING: Number of valid cooling days is zero.")
# Raise a division error if dividing by zero and replace with np.nan instead
old_err_state = np.seterr(divide='raise')
try:
average_daily_cooling_runtime = np.divide(total_runtime_core_cooling, n_days)
except FloatingPointError:
average_daily_cooling_runtime = np.nan
np.seterr(**old_err_state)
baseline10_demand = self.get_baseline_cooling_demand(
core_cooling_day_set,
baseline10_comfort_temperature,
tau,
)
baseline10_runtime = self.get_baseline_cooling_runtime(
baseline10_demand,
alpha
)
avoided_runtime_baseline10 = avoided(baseline10_runtime, daily_runtime)
savings_baseline10 = percent_savings(avoided_runtime_baseline10, baseline10_runtime)
if baseline_regional_cooling_comfort_temperature is not None:
baseline_regional_demand = self.get_baseline_cooling_demand(
core_cooling_day_set,
baseline_regional_cooling_comfort_temperature,
tau
)
baseline_regional_runtime = self.get_baseline_cooling_runtime(
baseline_regional_demand,
alpha
)
avoided_runtime_baseline_regional = avoided(baseline_regional_runtime, daily_runtime)
savings_baseline_regional = percent_savings(avoided_runtime_baseline_regional, baseline_regional_runtime)
percent_savings_baseline_regional = savings_baseline_regional
avoided_daily_mean_core_day_runtime_baseline_regional = avoided_runtime_baseline_regional.mean()
avoided_total_core_day_runtime_baseline_regional = avoided_runtime_baseline_regional.sum()
baseline_daily_mean_core_day_runtime_baseline_regional = baseline_regional_runtime.mean()
baseline_total_core_day_runtime_baseline_regional = baseline_regional_runtime.sum()
_daily_mean_core_day_demand_baseline_baseline_regional = np.nanmean(baseline_regional_demand)
else:
baseline_regional_demand = None
baseline_regional_runtime = None
avoided_runtime_baseline_regional = None
savings_baseline_regional = None
percent_savings_baseline_regional = None
avoided_daily_mean_core_day_runtime_baseline_regional = None
avoided_total_core_day_runtime_baseline_regional = None
baseline_daily_mean_core_day_runtime_baseline_regional = None
baseline_total_core_day_runtime_baseline_regional = None
_daily_mean_core_day_demand_baseline_baseline_regional = None
n_days_both, n_days_insufficient_data = self.get_ignored_days(core_cooling_day_set)
n_core_cooling_days = self.get_core_day_set_n_days(core_cooling_day_set)
n_days_in_inputfile_date_range = self.get_inputfile_date_range(core_cooling_day_set)
core_cooling_days_mean_indoor_temperature = self.temperature_in[core_cooling_day_set.hourly].mean()
core_cooling_days_mean_outdoor_temperature = self.temperature_out[core_cooling_day_set.hourly].mean()
outputs = {
"sw_version": get_version(),
"ct_identifier": self.thermostat_id,
"equipment_type": self.equipment_type,
"heating_or_cooling": core_cooling_day_set.name,
"zipcode": self.zipcode,
"station": self.station,
"climate_zone": climate_zone,
"start_date": pd.Timestamp(core_cooling_day_set.start_date).to_pydatetime().isoformat(),
"end_date": pd.Timestamp(core_cooling_day_set.end_date).to_pydatetime().isoformat(),
"n_days_in_inputfile_date_range": n_days_in_inputfile_date_range,
"n_days_both_heating_and_cooling": n_days_both,
"n_days_insufficient_data": n_days_insufficient_data,
"n_core_cooling_days": n_core_cooling_days,
"baseline_percentile_core_cooling_comfort_temperature": baseline10_comfort_temperature,
"regional_average_baseline_cooling_comfort_temperature": baseline_regional_cooling_comfort_temperature,
"percent_savings_baseline_percentile": savings_baseline10,
"avoided_daily_mean_core_day_runtime_baseline_percentile": avoided_runtime_baseline10.mean(),
"avoided_total_core_day_runtime_baseline_percentile": avoided_runtime_baseline10.sum(),
"baseline_daily_mean_core_day_runtime_baseline_percentile": baseline10_runtime.mean(),
"baseline_total_core_day_runtime_baseline_percentile": baseline10_runtime.sum(),
"_daily_mean_core_day_demand_baseline_baseline_percentile": np.nanmean(baseline10_demand),
"percent_savings_baseline_regional": percent_savings_baseline_regional,
"avoided_daily_mean_core_day_runtime_baseline_regional": avoided_daily_mean_core_day_runtime_baseline_regional,
"avoided_total_core_day_runtime_baseline_regional": avoided_total_core_day_runtime_baseline_regional,
"baseline_daily_mean_core_day_runtime_baseline_regional": baseline_daily_mean_core_day_runtime_baseline_regional,
"baseline_total_core_day_runtime_baseline_regional": baseline_total_core_day_runtime_baseline_regional,
"_daily_mean_core_day_demand_baseline_baseline_regional": _daily_mean_core_day_demand_baseline_baseline_regional,
"mean_demand": np.nanmean(demand),
"tau": tau,
"alpha": alpha,
"mean_sq_err": mse,
"root_mean_sq_err": rmse,
"cv_root_mean_sq_err": cvrmse,
"mean_abs_pct_err": mape,
"mean_abs_err": mae,
"total_core_cooling_runtime": total_runtime_core_cooling,
"daily_mean_core_cooling_runtime": average_daily_cooling_runtime,
"core_cooling_days_mean_indoor_temperature": core_cooling_days_mean_indoor_temperature,
"core_cooling_days_mean_outdoor_temperature": core_cooling_days_mean_outdoor_temperature,
"core_mean_indoor_temperature": core_cooling_days_mean_indoor_temperature,
"core_mean_outdoor_temperature": core_cooling_days_mean_outdoor_temperature,
}
metrics.append(outputs)
if self.equipment_type in self.HEATING_EQUIPMENT_TYPES:
for core_heating_day_set in self.get_core_heating_days(method=core_heating_day_set_method):
baseline90_comfort_temperature = \
self.get_core_heating_day_baseline_setpoint(core_heating_day_set)
# deltaT
daily_runtime = self.heat_runtime[core_heating_day_set.daily]
(
demand,
tau,
alpha,
mse,
rmse,
cvrmse,
mape,
mae,
) = self.get_heating_demand(core_heating_day_set)
total_runtime_core_heating = daily_runtime.sum()
n_days = core_heating_day_set.daily.sum()
if np.isnan(total_runtime_core_heating):
warn(
"WARNING: Total Runtime Core Heating is nan. "
"This may mean that you have pandas 0.21.x installed "
"(which is not supported).")
if n_days == 0:
warn(
"WARNING: Number of valid heating days is zero.")
# Raise a division error if dividing by zero and replace with np.nan instead
old_err_state = np.seterr(divide='raise')
try:
average_daily_heating_runtime = np.divide(total_runtime_core_heating, n_days)
except FloatingPointError:
average_daily_heating_runtime = np.nan
np.seterr(**old_err_state)
baseline90_demand = self.get_baseline_heating_demand(
core_heating_day_set,
baseline90_comfort_temperature,
tau,
)
baseline90_runtime = self.get_baseline_heating_runtime(
baseline90_demand,
alpha,
)
avoided_runtime_baseline90 = avoided(baseline90_runtime, daily_runtime)
savings_baseline90 = percent_savings(avoided_runtime_baseline90, baseline90_runtime)
if baseline_regional_heating_comfort_temperature is not None:
baseline_regional_demand = self.get_baseline_heating_demand(
core_heating_day_set,
baseline_regional_heating_comfort_temperature,
tau,
)
baseline_regional_runtime = self.get_baseline_heating_runtime(
baseline_regional_demand,
alpha,
)
avoided_runtime_baseline_regional = avoided(baseline_regional_runtime, daily_runtime)
savings_baseline_regional = percent_savings(avoided_runtime_baseline_regional, baseline_regional_runtime)
percent_savings_baseline_regional = savings_baseline_regional
avoided_daily_mean_core_day_runtime_baseline_regional = avoided_runtime_baseline_regional.mean()
avoided_total_core_day_runtime_baseline_regional = avoided_runtime_baseline_regional.sum()
baseline_daily_mean_core_day_runtime_baseline_regional = baseline_regional_runtime.mean()
baseline_total_core_day_runtime_baseline_regional = baseline_regional_runtime.sum()
_daily_mean_core_day_demand_baseline_baseline_regional = np.nanmean(baseline_regional_demand)
else:
baseline_regional_demand = None
baseline_regional_runtime = None
avoided_runtime_baseline_regional = None
savings_baseline_regional = None
percent_savings_baseline_regional = None
avoided_daily_mean_core_day_runtime_baseline_regional = None
avoided_total_core_day_runtime_baseline_regional = None
baseline_daily_mean_core_day_runtime_baseline_regional = None
baseline_total_core_day_runtime_baseline_regional = None
_daily_mean_core_day_demand_baseline_baseline_regional = None
n_days_both, n_days_insufficient_data = self.get_ignored_days(core_heating_day_set)
n_core_heating_days = self.get_core_day_set_n_days(core_heating_day_set)
n_days_in_inputfile_date_range = self.get_inputfile_date_range(core_heating_day_set)
core_heating_days_mean_indoor_temperature = self.temperature_in[core_heating_day_set.hourly].mean()
core_heating_days_mean_outdoor_temperature = self.temperature_out[core_heating_day_set.hourly].mean()
outputs = {
"sw_version": get_version(),
"ct_identifier": self.thermostat_id,
"equipment_type": self.equipment_type,
"heating_or_cooling": core_heating_day_set.name,
"zipcode": self.zipcode,
"station": self.station,
"climate_zone": climate_zone,
"start_date": pd.Timestamp(core_heating_day_set.start_date).to_pydatetime().isoformat(),
"end_date": pd.Timestamp(core_heating_day_set.end_date).to_pydatetime().isoformat(),
"n_days_in_inputfile_date_range": n_days_in_inputfile_date_range,
"n_days_both_heating_and_cooling": n_days_both,
"n_days_insufficient_data": n_days_insufficient_data,
"n_core_heating_days": n_core_heating_days,
"baseline_percentile_core_heating_comfort_temperature": baseline90_comfort_temperature,
"regional_average_baseline_heating_comfort_temperature": baseline_regional_heating_comfort_temperature,
"percent_savings_baseline_percentile": savings_baseline90,
"avoided_daily_mean_core_day_runtime_baseline_percentile": avoided_runtime_baseline90.mean(),
"avoided_total_core_day_runtime_baseline_percentile": avoided_runtime_baseline90.sum(),
"baseline_daily_mean_core_day_runtime_baseline_percentile": baseline90_runtime.mean(),
"baseline_total_core_day_runtime_baseline_percentile": baseline90_runtime.sum(),
"_daily_mean_core_day_demand_baseline_baseline_percentile": np.nanmean(baseline90_demand),
"percent_savings_baseline_regional": savings_baseline_regional,
"avoided_daily_mean_core_day_runtime_baseline_regional": avoided_daily_mean_core_day_runtime_baseline_regional,
"avoided_total_core_day_runtime_baseline_regional": avoided_total_core_day_runtime_baseline_regional,
"baseline_daily_mean_core_day_runtime_baseline_regional": baseline_daily_mean_core_day_runtime_baseline_regional,
"baseline_total_core_day_runtime_baseline_regional": baseline_total_core_day_runtime_baseline_regional,
"_daily_mean_core_day_demand_baseline_baseline_regional": _daily_mean_core_day_demand_baseline_baseline_regional,
"mean_demand": np.nanmean(demand),
"tau": tau,
"alpha": alpha,
"mean_sq_err": mse,
"root_mean_sq_err": rmse,
"cv_root_mean_sq_err": cvrmse,
"mean_abs_pct_err": mape,
"mean_abs_err": mae,
"total_core_heating_runtime": total_runtime_core_heating,
"daily_mean_core_heating_runtime": average_daily_heating_runtime,
"core_heating_days_mean_indoor_temperature": core_heating_days_mean_indoor_temperature,
"core_heating_days_mean_outdoor_temperature": core_heating_days_mean_outdoor_temperature,
"core_mean_indoor_temperature": core_heating_days_mean_indoor_temperature,
"core_mean_outdoor_temperature": core_heating_days_mean_outdoor_temperature,
}
if self.equipment_type in self.AUX_EMERG_EQUIPMENT_TYPES:
additional_outputs = {
"total_auxiliary_heating_core_day_runtime":
self.total_auxiliary_heating_runtime(
core_heating_day_set),
"total_emergency_heating_core_day_runtime":
self.total_emergency_heating_runtime(
core_heating_day_set),
}
# Add RHU Calculations
for rhu_type in ('rhu1', 'rhu2'):
if rhu_type == 'rhu2':
min_runtime_minutes = VAR_MIN_RHU_RUNTIME
else:
min_runtime_minutes = None
rhu_runtime = self.get_resistance_heat_utilization_runtime(core_heating_day_set)
# Add duty cycle records
heat_runtime = rhu_runtime.heat_runtime.sum()
aux_runtime = rhu_runtime.aux_runtime.sum()
emg_runtime = rhu_runtime.emg_runtime.sum()
total_minutes = rhu_runtime.total_minutes.sum()
additional_outputs[rhu_type + '_aux_duty_cycle'] = aux_runtime / total_minutes
additional_outputs[rhu_type + '_emg_duty_cycle'] = emg_runtime / total_minutes
additional_outputs[rhu_type + '_compressor_duty_cycle'] = heat_runtime / total_minutes
rhu_first = self.get_resistance_heat_utilization_bins(
rhu_runtime,
RESISTANCE_HEAT_USE_BIN_FIRST,
core_heating_day_set,
min_runtime_minutes)
rhu_second = self.get_resistance_heat_utilization_bins(
rhu_runtime,
RESISTANCE_HEAT_USE_BIN_SECOND,
core_heating_day_set,
min_runtime_minutes)
for duty_cycle in (None, 'aux_duty_cycle', 'emg_duty_cycle', 'compressor_duty_cycle'):
if rhu_first is not None:
for item in rhu_first.itertuples():
column = self._format_rhu(
rhu_type=rhu_type,
low=item.Index.left,
high=item.Index.right,
duty_cycle=duty_cycle)
if duty_cycle is None:
additional_outputs[column] = item.rhu
else:
additional_outputs[column] = getattr(item, duty_cycle)
else:
for (low, high) in RESISTANCE_HEAT_USE_BIN_FIRST_TUPLE:
column = self._format_rhu(
rhu_type,
low,
high,
duty_cycle)
additional_outputs[column] = None
if rhu_second is not None:
for item in rhu_second.itertuples():
column = self._format_rhu(
rhu_type=rhu_type,
low=item.Index.left,
high=item.Index.right,
duty_cycle=duty_cycle)
if duty_cycle is None:
additional_outputs[column] = item.rhu
else:
additional_outputs[column] = getattr(item, duty_cycle)
else:
for (low, high) in RESISTANCE_HEAT_USE_BIN_SECOND_TUPLE:
column = self._format_rhu(
rhu_type,
low,
high,
duty_cycle)
additional_outputs[column] = None
outputs.update(additional_outputs)
metrics.append(outputs)
return metrics
| mit |
shakamunyi/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 37 | 3651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
mktumbi/SimAnaRep | SimAnaRepproRMSD.py | 1 | 2516 | import MDAnalysis
import matplotlib.pyplot as plt
import numpy as np
from MDAnalysis.analysis.align import *
from MDAnalysis.analysis.rms import rmsd
def proRMSD(u,ref):
"""
This function produces RMSD data and plots for Protein.
:input
1) Universe of Trajectory
2) reference universe
:return
1) matplot object
2) array for RMSD data.
"""
RMSD = []
RMSDAllAtom = []
backbone = u.select_atoms("protein and (name C or name N or name CA)")
reference = ref.select_atoms("protein and (name C or name N or name CA)")
Allcurrent = u.select_atoms("protein and not name H*")
Allreference = ref.select_atoms("protein and not name H*")
for ts in u.trajectory:
A = backbone.coordinates()
B = reference.coordinates()
E = Allcurrent.coordinates()
F = Allreference.coordinates()
C = rmsd(A,B)
G = rmsd(E,F)
RMSD.append((u.trajectory.frame, C))
RMSDAllAtom.append((u.trajectory.frame, G))
RMSD = np.array(RMSD)
RMSDAllAtom = np.array(RMSDAllAtom)
#print RMSDAllAtom
#print RMSD
ax = plt.subplot(111)
ax.plot(RMSD[:,0], RMSD[:,1], 'r', lw=2, label="Calpha RMSD")
ax.plot(RMSDAllAtom[:,0], RMSDAllAtom[:,1], 'g', lw=2, label="All Atom RMSD (noH)")
ax.set_xlabel("Frame")
ax.set_ylabel(r"RMSD of Backbone ($\AA$)")
#ax.figure.savefig("RMSD.pdf")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc = 'lower left')
#plt.draw()
return ax, RMSD, RMSDAllAtom
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='This function will plot Radius of gyration for a given universe (trajectory).')
parser.add_argument('-j', '--jobname', help='Enter your job name and it will appear as first coloumn in the result file', default='Test')
parser.add_argument('-trj', '--trajectory', help='Filename of Trajecotry file.', required=True)
parser.add_argument('-top', '--topology', help='Filename of psf/topology file', required=True)
args = parser.parse_args()
u = MDAnalysis.Universe(args.topology, args.trajectory)
ref = MDAnalysis.Universe(args.topology, args.trajectory)
caRMSD =[]
allRMSD = []
fig,caRMSD,allRMSD = proRMSD(u,ref)
#print caRMSD
np.savetxt(args.jobname+"-caRMSD-pro.data", caRMSD)
np.savetxt(args.jobname+"-allRMSD-pro.data", allRMSD)
fig.figure.savefig(args.jobname+"-proRMSD.pdf") | gpl-2.0 |
rs2/pandas | pandas/tests/io/excel/conftest.py | 8 | 1355 | import pytest
import pandas.util._test_decorators as td
import pandas._testing as tm
from pandas.io.parsers import read_csv
@pytest.fixture
def frame(float_frame):
"""
Returns the first ten items in fixture "float_frame".
"""
return float_frame[:10]
@pytest.fixture
def tsframe():
return tm.makeTimeDataFrame()[:5]
@pytest.fixture(params=[True, False])
def merge_cells(request):
return request.param
@pytest.fixture
def df_ref(datapath):
"""
Obtain the reference data from read_csv with the Python engine.
"""
filepath = datapath("io", "data", "csv", "test1.csv")
df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python")
return df_ref
@pytest.fixture(params=[".xls", ".xlsx", ".xlsm", ".ods", ".xlsb"])
def read_ext(request):
"""
Valid extensions for reading Excel files.
"""
return request.param
@pytest.fixture(autouse=True)
def check_for_file_leaks():
"""
Fixture to run around every test to ensure that we are not leaking files.
See also
--------
_test_decorators.check_file_leaks
"""
# GH#30162
psutil = td.safe_import("psutil")
if not psutil:
yield
else:
proc = psutil.Process()
flist = proc.open_files()
yield
flist2 = proc.open_files()
assert flist == flist2
| bsd-3-clause |
abimannans/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
hanw/connectal | examples/gyro_simple/test_gyro.py | 3 | 6205 | #!/usr/bin/env python
# Copyright (c) 2013 Quanta Research Cambridge, Inc.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import socket
import struct
import time
import ctypes
import os
import numpy
import pandas as pd
import math
from gyroVisualize import *
import argparse
import json
sys.path.append(os.path.abspath('../../scripts'))
import portalJson
class gyro_stream:
def __init__(self, lpf=False):
self.times = 0
self.tails = [[],[],[]]
self.means = [0,0,0]
self.calibrate_window = 0
self.sample_freq_hz = 100
self.lpf = lpf
def radians(self, sample):
# sensitivity of sample is 70 milli-degrees-per-second/digit.
# multiply sample by 70 to get milli-degrees-per-second
# divide by sample_freq_hz to get milli-degrees
# divide by 1000 to get degrees
return (math.radians(sample[0]*70.0/self.sample_freq_hz/1000.0),
math.radians(-sample[1]*70.0/self.sample_freq_hz/1000.0),
math.radians(-sample[2]*70.0/self.sample_freq_hz/1000.0))
def next_samples(self,samples):
self.times = self.times+1
octave_length = 20
window_sz = 10
rv = []
write_octave = True
if (write_octave):
octave_file = open("x.m", "w");
octave_file.write("#! /usr/bin/octave --persist \nv = [");
num_samples = len(samples)
if (self.lpf):
x = numpy.concatenate((self.tails[0],samples[0::3]),0)
y = numpy.concatenate((self.tails[1],samples[1::3]),0)
z = numpy.concatenate((self.tails[2],samples[2::3]),0)
xs = pd.rolling_mean(pd.Series(x),window=window_sz)[window_sz:]
ys = pd.rolling_mean(pd.Series(y),window=window_sz)[window_sz:]
zs = pd.rolling_mean(pd.Series(z),window=window_sz)[window_sz:]
self.tails[0] = x[-window_sz:]
self.tails[1] = y[-window_sz:]
self.tails[2] = z[-window_sz:]
else:
xs = samples[0::3]
ys = samples[1::3]
zs = samples[2::3]
if (self.times <= octave_length):
print self.times
for x,y,z in zip(xs,ys,zs):
#print "%d %d %d" % (x,y,z)
if (self.times <= octave_length):
self.calibrate_window += 1
self.means[0] += x;
self.means[1] += y;
self.means[2] += z;
if (write_octave):
octave_file.write("%d, %d, %d; \n" % (x,y,z));
else:
pos = (x-self.means[0],y-self.means[1],z-self.means[2])
rv.append(self.radians(pos))
#print "%d %d %d" %(pos[0],pos[1],pos[2])
if (self.times == octave_length):
for i in range (0,len(self.means)):
self.means[i] = self.means[i]/self.calibrate_window
print "x_mean:%d y_mean:%d, z_mean:%d\n" % (self.means[0],self.means[1],self.means[2])
if (write_octave):
octave_file.write("];\n");
octave_file.write("plot(v(:,1),color=\"r\");\n");
octave_file.write("hold on;\n");
octave_file.write("plot(v(:,2),color=\"g\");\n");
octave_file.write("plot(v(:,3),color=\"b\");\n");
octave_file.close()
print "done writing octave_file"
if (self.times > octave_length):
return rv
smoothe = False
if __name__ == "__main__":
argparser = argparse.ArgumentParser('Display gyroscope data')
argparser.add_argument('-v', '--visualize', help='Display gyro orientation in 3D rendering', default=False, action='store_true')
argparser.add_argument('-a', '--address', help='Device address', default=None)
options = argparser.parse_args()
spew = not options.visualize;
visualize = options.visualize;
print options.address
if not options.address:
options.address = os.environ['RUNPARAM']
if (visualize):
v = gv()
gs = gyro_stream()
jp = portalJson.portal(options.address, 5000)
summ = [0,0,0]
try:
while (True):
samples = []
for i in range(0,48):
d = json.loads(jp.recv())
samples.append(d['x'])
samples.append(d['y'])
samples.append(d['z'])
poss = gs.next_samples(samples)
if poss is not None:
for pos in poss:
if (spew): print "%f %f %f" % (pos[0],pos[1],pos[2])
summ[0] = summ[0]+pos[0]
summ[1] = summ[1]+pos[1]
summ[2] = summ[2]+pos[2]
if (visualize and smoothe):
v.update(summ, gs.sample_freq_hz)
time.sleep(1/gs.sample_freq_hz)
if (visualize and (not smoothe)):
v.update(summ, gs.sample_freq_hz)
if (not spew): print "%f %f %f" % (summ[0], summ[1], summ[2])
except KeyboardInterrupt:
jp.shutdown()
sys.exit()
| mit |
fyffyt/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
lbishal/scikit-learn | sklearn/utils/tests/test_utils.py | 35 | 9000 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.linalg import eigh
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
leggitta/mne-python | mne/io/proj.py | 3 | 24371 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from math import sqrt
import numpy as np
from scipy import linalg
from itertools import count
import warnings
from .tree import dir_tree_find
from .tag import find_tag
from .constants import FIFF
from .pick import pick_types
from .write import (write_int, write_float, write_string, write_name_list,
write_float_matrix, end_block, start_block)
from ..utils import logger, verbose
from ..externals.six import string_types
class Projection(dict):
"""Projection vector
A basic class to proj a meaningful print for projection vectors.
"""
def __repr__(self):
s = "%s" % self['desc']
s += ", active : %s" % self['active']
s += ", n_channels : %s" % self['data']['ncol']
return "<Projection | %s>" % s
class ProjMixin(object):
"""Mixin class for Raw, Evoked, Epochs
Notes
-----
This mixin adds a proj attribute as a property to data containers.
It is True if at least one proj is present and all of them are active.
The projs might not be applied yet if data are not preloaded. In
this case it's the _projector attribute that does the job.
If a private _data attribute is present then the projs applied
to it are the ones marked as active.
A proj parameter passed in constructor of raw or epochs calls
apply_proj and hence after the .proj attribute is True.
As soon as you've applied the projs it will stay active in the
remaining pipeline.
The suggested pipeline is proj=True in epochs (it's cheaper than for raw).
When you use delayed SSP in Epochs, projs are applied when you call
get_data() method. They are not applied to the evoked._data unless you call
apply_proj(). The reason is that you want to reject with projs although
it's not stored in proj mode.
"""
@property
def proj(self):
return (len(self.info['projs']) > 0 and
all(p['active'] for p in self.info['projs']))
def add_proj(self, projs, remove_existing=False):
"""Add SSP projection vectors
Parameters
----------
projs : list
List with projection vectors.
remove_existing : bool
Remove the projection vectors currently in the file.
Returns
-------
self : instance of Raw | Epochs | Evoked
The data container.
"""
if isinstance(projs, Projection):
projs = [projs]
if (not isinstance(projs, list) and
not all(isinstance(p, Projection) for p in projs)):
raise ValueError('Only projs can be added. You supplied '
'something else.')
# mark proj as inactive, as they have not been applied
projs = deactivate_proj(projs, copy=True, verbose=self.verbose)
if remove_existing:
# we cannot remove the proj if they are active
if any(p['active'] for p in self.info['projs']):
raise ValueError('Cannot remove projectors that have '
'already been applied')
self.info['projs'] = projs
else:
self.info['projs'].extend(projs)
return self
def apply_proj(self):
"""Apply the signal space projection (SSP) operators to the data.
Notes
-----
Once the projectors have been applied, they can no longer be
removed. It is usually not recommended to apply the projectors at
too early stages, as they are applied automatically later on
(e.g. when computing inverse solutions).
Hint: using the copy method individual projection vectors
can be tested without affecting the original data.
With evoked data, consider the following example::
projs_a = mne.read_proj('proj_a.fif')
projs_b = mne.read_proj('proj_b.fif')
# add the first, copy, apply and see ...
evoked.add_proj(a).copy().apply_proj().plot()
# add the second, copy, apply and see ...
evoked.add_proj(b).copy().apply_proj().plot()
# drop the first and see again
evoked.copy().del_proj(0).apply_proj().plot()
evoked.apply_proj() # finally keep both
Returns
-------
self : instance of Raw | Epochs | Evoked
The instance.
"""
from ..epochs import _BaseEpochs
from .base import _BaseRaw
if self.info['projs'] is None or len(self.info['projs']) == 0:
logger.info('No projector specified for this dataset.'
'Please consider the method self.add_proj.')
return self
# Exit delayed mode if you apply proj
if isinstance(self, _BaseEpochs) and self._do_delayed_proj:
logger.info('Leaving delayed SSP mode.')
self._do_delayed_proj = False
if all(p['active'] for p in self.info['projs']):
logger.info('Projections have already been applied. '
'Setting proj attribute to True.')
return self
_projector, info = setup_proj(deepcopy(self.info), activate=True,
verbose=self.verbose)
# let's not raise a RuntimeError here, otherwise interactive plotting
if _projector is None: # won't be fun.
logger.info('The projections don\'t apply to these data.'
' Doing nothing.')
return self
self._projector, self.info = _projector, info
if isinstance(self, _BaseRaw):
if self.preload:
self._data = np.dot(self._projector, self._data)
elif isinstance(self, _BaseEpochs):
if self.preload:
for ii, e in enumerate(self._data):
self._data[ii] = self._project_epoch(e)
else:
self.load_data() # will automatically apply
else: # Evoked
self.data = np.dot(self._projector, self.data)
logger.info('SSP projectors applied...')
return self
def del_proj(self, idx):
"""Remove SSP projection vector
Note: The projection vector can only be removed if it is inactive
(has not been applied to the data).
Parameters
----------
idx : int
Index of the projector to remove.
Returns
-------
self : instance of Raw | Epochs | Evoked
"""
if self.info['projs'][idx]['active']:
raise ValueError('Cannot remove projectors that have already '
'been applied')
self.info['projs'].pop(idx)
return self
def plot_projs_topomap(self, ch_type=None, layout=None, axes=None):
"""Plot SSP vector
Parameters
----------
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None | List
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted. If None
(default), it will return all channel types present. If a list of
ch_types is provided, it will return multiple figures.
layout : None | Layout | List of Layouts
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations. Or a list of Layout if projections
are from different sensor types.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
if self.info['projs'] is not None or len(self.info['projs']) != 0:
from ..viz.topomap import plot_projs_topomap
from ..channels.layout import find_layout
if layout is None:
layout = []
if ch_type is None:
ch_type = [ch for ch in ['meg', 'eeg'] if ch in self]
elif isinstance(ch_type, string_types):
ch_type = [ch_type]
for ch in ch_type:
if ch in self:
layout.append(find_layout(self.info, ch, exclude=[]))
else:
err = 'Channel type %s is not found in info.' % ch
warnings.warn(err)
fig = plot_projs_topomap(self.info['projs'], layout, axes=axes)
else:
raise ValueError("Info is missing projs. Nothing to plot.")
return fig
def _proj_equal(a, b):
""" Test if two projectors are equal """
equal = (a['active'] == b['active'] and
a['kind'] == b['kind'] and
a['desc'] == b['desc'] and
a['data']['col_names'] == b['data']['col_names'] and
a['data']['row_names'] == b['data']['row_names'] and
a['data']['ncol'] == b['data']['ncol'] and
a['data']['nrow'] == b['data']['nrow'] and
np.all(a['data']['data'] == b['data']['data']))
return equal
@verbose
def _read_proj(fid, node, verbose=None):
"""Read spatial projections from a FIF file.
Parameters
----------
fid : file
The file descriptor of the open file.
node : tree node
The node of the tree where to look.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
projs: dict
The list of projections.
"""
projs = list()
# Locate the projection data
nodes = dir_tree_find(node, FIFF.FIFFB_PROJ)
if len(nodes) == 0:
return projs
tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN)
if tag is not None:
global_nchan = int(tag.data)
items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM)
for i in range(len(items)):
# Find all desired tags in one item
item = items[i]
tag = find_tag(fid, item, FIFF.FIFF_NCHAN)
if tag is not None:
nchan = int(tag.data)
else:
nchan = global_nchan
tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION)
if tag is not None:
desc = tag.data
else:
tag = find_tag(fid, item, FIFF.FIFF_NAME)
if tag is not None:
desc = tag.data
else:
raise ValueError('Projection item description missing')
# XXX : is this useful ?
# tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
# if tag is not None:
# namelist = tag.data
# else:
# raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND)
if tag is not None:
kind = int(tag.data)
else:
raise ValueError('Projection item kind missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC)
if tag is not None:
nvec = int(tag.data)
else:
raise ValueError('Number of projection vectors not specified')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST)
if tag is not None:
names = tag.data.split(':')
else:
raise ValueError('Projection item channel list missing')
tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS)
if tag is not None:
data = tag.data
else:
raise ValueError('Projection item data missing')
tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE)
if tag is not None:
active = bool(tag.data)
else:
active = False
# handle the case when data is transposed for some reason
if data.shape[0] == len(names) and data.shape[1] == nvec:
data = data.T
if data.shape[1] != len(names):
raise ValueError('Number of channel names does not match the '
'size of data matrix')
# Use exactly the same fields in data as in a named matrix
one = Projection(kind=kind, active=active, desc=desc,
data=dict(nrow=nvec, ncol=nchan, row_names=None,
col_names=names, data=data))
projs.append(one)
if len(projs) > 0:
logger.info(' Read a total of %d projection items:' % len(projs))
for k in range(len(projs)):
if projs[k]['active']:
misc = 'active'
else:
misc = ' idle'
logger.info(' %s (%d x %d) %s'
% (projs[k]['desc'], projs[k]['data']['nrow'],
projs[k]['data']['ncol'], misc))
return projs
###############################################################################
# Write
def _write_proj(fid, projs):
"""Write a projection operator to a file.
Parameters
----------
fid : file
The file descriptor of the open file.
projs : dict
The projection operator.
"""
start_block(fid, FIFF.FIFFB_PROJ)
for proj in projs:
start_block(fid, FIFF.FIFFB_PROJ_ITEM)
write_int(fid, FIFF.FIFF_NCHAN, proj['data']['ncol'])
write_name_list(fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST,
proj['data']['col_names'])
write_string(fid, FIFF.FIFF_NAME, proj['desc'])
write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj['kind'])
if proj['kind'] == FIFF.FIFFV_PROJ_ITEM_FIELD:
write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0)
write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj['data']['nrow'])
write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj['active'])
write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS,
proj['data']['data'])
end_block(fid, FIFF.FIFFB_PROJ_ITEM)
end_block(fid, FIFF.FIFFB_PROJ)
###############################################################################
# Utils
def make_projector(projs, ch_names, bads=[], include_active=True):
"""Create an SSP operator from SSP projection vectors
Parameters
----------
projs : list
List of projection vectors.
ch_names : list of strings
List of channels to include in the projection matrix.
bads : list of strings
Some bad channels to exclude. If bad channels were marked
in the raw file when projs were calculated using mne-python,
they should not need to be included here as they will
have been automatically omitted from the projectors.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
U : array
The orthogonal basis of the projection vectors (optional).
"""
nchan = len(ch_names)
if nchan == 0:
raise ValueError('No channel names specified')
default_return = (np.eye(nchan, nchan), 0, [])
# Check trivial cases first
if projs is None:
return default_return
nvec = 0
nproj = 0
for p in projs:
if not p['active'] or include_active:
nproj += 1
nvec += p['data']['nrow']
if nproj == 0:
return default_return
# Pick the appropriate entries
vecs = np.zeros((nchan, nvec))
nvec = 0
nonzero = 0
for k, p in enumerate(projs):
if not p['active'] or include_active:
if (len(p['data']['col_names']) !=
len(np.unique(p['data']['col_names']))):
raise ValueError('Channel name list in projection item %d'
' contains duplicate items' % k)
# Get the two selection vectors to pick correct elements from
# the projection vectors omitting bad channels
sel = []
vecsel = []
for c, name in enumerate(ch_names):
if name in p['data']['col_names'] and name not in bads:
sel.append(c)
vecsel.append(p['data']['col_names'].index(name))
# If there is something to pick, pickit
if len(sel) > 0:
nrow = p['data']['nrow']
vecs[sel, nvec:nvec + nrow] = p['data']['data'][:, vecsel].T
# Rescale for better detection of small singular values
for v in range(p['data']['nrow']):
psize = sqrt(np.sum(vecs[:, nvec + v] * vecs[:, nvec + v]))
if psize > 0:
vecs[:, nvec + v] /= psize
nonzero += 1
nvec += p['data']['nrow']
# Check whether all of the vectors are exactly zero
if nonzero == 0:
return default_return
# Reorthogonalize the vectors
U, S, V = linalg.svd(vecs[:, :nvec], full_matrices=False)
# Throw away the linearly dependent guys
nproj = np.sum((S / S[0]) > 1e-2)
U = U[:, :nproj]
# Here is the celebrated result
proj = np.eye(nchan, nchan) - np.dot(U, U.T)
return proj, nproj, U
def make_projector_info(info, include_active=True):
"""Make an SSP operator using the measurement info
Calls make_projector on good channels.
Parameters
----------
info : dict
Measurement info.
include_active : bool
Also include projectors that are already active.
Returns
-------
proj : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
nproj : int
How many items in the projector.
"""
proj, nproj, _ = make_projector(info['projs'], info['ch_names'],
info['bads'], include_active)
return proj, nproj
@verbose
def activate_proj(projs, copy=True, verbose=None):
"""Set all projections to active
Useful before passing them to make_projector.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Activate the projection items
for proj in projs:
proj['active'] = True
logger.info('%d projection items activated' % len(projs))
return projs
@verbose
def deactivate_proj(projs, copy=True, verbose=None):
"""Set all projections to inactive
Useful before saving raw data without projectors applied.
Parameters
----------
projs : list
The projectors.
copy : bool
Modify projs in place or operate on a copy.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
projs : list
The projectors.
"""
if copy:
projs = deepcopy(projs)
# Deactivate the projection items
for proj in projs:
proj['active'] = False
logger.info('%d projection items deactivated' % len(projs))
return projs
@verbose
def make_eeg_average_ref_proj(info, activate=True, verbose=None):
"""Create an EEG average reference SSP projection vector
Parameters
----------
info : dict
Measurement info.
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
eeg_proj: instance of Projection
The SSP/PCA projector.
"""
if info.get('custom_ref_applied', False):
raise RuntimeError('Cannot add an average EEG reference projection '
'since a custom reference has been applied to the '
'data earlier.')
logger.info("Adding average EEG reference projection.")
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
ch_names = info['ch_names']
eeg_names = [ch_names[k] for k in eeg_sel]
n_eeg = len(eeg_sel)
if n_eeg == 0:
raise ValueError('Cannot create EEG average reference projector '
'(no EEG data found)')
vec = np.ones((1, n_eeg)) / n_eeg
eeg_proj_data = dict(col_names=eeg_names, row_names=None,
data=vec, nrow=1, ncol=n_eeg)
eeg_proj = Projection(active=activate, data=eeg_proj_data,
desc='Average EEG reference',
kind=FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF)
return eeg_proj
def _has_eeg_average_ref_proj(projs):
"""Determine if a list of projectors has an average EEG ref"""
for proj in projs:
if (proj['desc'] == 'Average EEG reference' or
proj['kind'] == FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF):
return True
return False
def _needs_eeg_average_ref_proj(info):
"""Determine if the EEG needs an averge EEG reference
This returns True if no custom reference has been applied and no average
reference projection is present in the list of projections.
"""
eeg_sel = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
return (len(eeg_sel) > 0 and
not info['custom_ref_applied'] and
not _has_eeg_average_ref_proj(info['projs']))
@verbose
def setup_proj(info, add_eeg_ref=True, activate=True,
verbose=None):
"""Set up projection for Raw and Epochs
Parameters
----------
info : dict
The measurement info.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
activate : bool
If True projections are activated.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
projector : array of shape [n_channels, n_channels]
The projection operator to apply to the data.
info : dict
The modified measurement info (Warning: info is modified inplace).
"""
# Add EEG ref reference proj if necessary
if _needs_eeg_average_ref_proj(info) and add_eeg_ref:
eeg_proj = make_eeg_average_ref_proj(info, activate=activate)
info['projs'].append(eeg_proj)
# Create the projector
projector, nproj = make_projector_info(info)
if nproj == 0:
if verbose:
logger.info('The projection vectors do not apply to these '
'channels')
projector = None
else:
logger.info('Created an SSP operator (subspace dimension = %d)'
% nproj)
# The projection items have been activated
if activate:
info['projs'] = activate_proj(info['projs'], copy=False)
return projector, info
def _uniquify_projs(projs):
"""Aux function"""
final_projs = []
for proj in projs: # flatten
if not any(_proj_equal(p, proj) for p in final_projs):
final_projs.append(proj)
my_count = count(len(final_projs))
def sorter(x):
"""sort in a nice way"""
digits = [s for s in x['desc'] if s.isdigit()]
if digits:
sort_idx = int(digits[-1])
else:
sort_idx = next(my_count)
return (sort_idx, x['desc'])
return sorted(final_projs, key=sorter)
| bsd-3-clause |
walkerps/ICGPM | cleaner.py | 1 | 1634 | import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
from nltk.corpus import wordnet
def main(data):
#print "Data is getting Cleaned"
columns = ['OrderId','Firstname']
data.columns = columns
name_list= []
name_value= []
data = data.dropna(subset = ['Firstname'])
for name in data.Firstname:
name_list.append(name)
for name in name_list:
if not re.search(r'[a-zA-Z]',name):
name_value.append(name)
for name in name_value:
data = data[data.Firstname != name]
name_list = []
name_value = []
for name in data.Firstname:
name_list.append(name)
#print "Midway point 1"
for name in name_list:
if re.search(r'[0-9]',name):
name_value.append(name)
for name in name_value:
data = data[data.Firstname != name]
name_list = []
name_value =[]
for name in data.Firstname:
name_list.append(name)
for name in name_list:
if re.search(r'[,.!@#$%^&*]',name):
name_value.append(name)
for name in name_value:
data = data[data.Firstname != name]
#print "midway point 2"
name_list = []
name_value = []
for name in data.Firstname:
name_list.append(name)
for name in name_list:
value = word_tokenize(name)
name_value.append(value[0])
data.Firstname = [name_value[ii] for ii in range(len(name_value))]
data.Firstname = map(lambda x:x.lower(),data.Firstname)
#print "midway point 3"
name_list = []
name_value =[]
for name in data.Firstname:
name_list.append(name)
for name in name_list:
if len(name) <=2:
name_value.append(name)
for name in name_value:
data = data[data.Firstname != name]
#print "midway point 4"
return data | apache-2.0 |
XInterns/IPL-Sparkers | src/Prediction+Model+in+Spark.py | 1 | 2063 |
# coding: utf-8
# In[105]:
import findspark
findspark.init()
from pyspark import SparkContext
from pyspark import SparkConf
# In[91]:
import pandas as pd
import numpy as np
from sklearn import grid_search, datasets
from spark_sklearn import GridSearchCV
from sklearn import ensemble
from pyspark.sql import SparkSession
from spark_sklearn.util import createLocalSparkSession
from patsy import dmatrices
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
# In[92]:
# In[93]:
df = pd.read_csv("../data/matcheswithfeatures.csv", index_col = 0)
df.tail()
# In[94]:
spark = createLocalSparkSession()
# In[95]:
y, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + Total_RF_Difference', df, return_type="dataframe")
y_arr = np.ravel(y)
# In[96]:
X.tail()
# In[97]:
X_timetrain = X.loc[X.index < 398]
Y_timetrain = y.loc[y.index < 398]
Y_timetrain_arr = np.ravel(Y_timetrain)
X_timetest = X.loc[X.index >= 398]
Y_timetest = y.loc[y.index >= 398]
Y_timetest_arr = np.ravel(Y_timetest)
X_timetest
# In[99]:
tuned_parameters = {
"n_estimators": [ 100 ],
"max_depth" : [ 3 ],
"learning_rate": [ 0.1 ],
}
gbc = ensemble.GradientBoostingClassifier()
clf = GridSearchCV(spark.sparkContext, gbc, tuned_parameters)
clf
# In[100]:
clf.fit(X_timetrain, Y_timetrain_arr)
clftest_pred = clf.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, clftest_pred) *100, "%"
# In[101]:
knn1 = KNeighborsClassifier()
knn_params = {
"n_neighbors": [31]
}
clf2 = GridSearchCV(spark.sparkContext, knn1, knn_params, n_jobs = 2)
clf2
# In[102]:
clf2.fit(X_timetrain, Y_timetrain_arr)
clf2test_pred = clf2.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, clf2test_pred) *100, "%"
# In[ ]:
| mit |
Windy-Ground/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
pianomania/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
drogenlied/qudi | core/__init__.py | 1 | 1672 | # -*- coding: utf-8 -*-
"""
This file contains the Qudi Manager class.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
__version__ = '0.1'
# import Qt
import os
if not 'QT_API' in os.environ:
# use PyQt4 as default
os.environ['QT_API'] = 'pyqt'
else:
print('Specified Qt API:', os.environ['QT_API'])
# if pyqt4 check environment variable is 'pyqt' and not 'pyqt4' (ipython,
# matplotlib, etc)
if os.environ['QT_API'].lower() == 'pyqt4':
os.environ['QT_API'] = 'pyqt'
import qtpy
print('Used Qt API:', qtpy.API_NAME)
import sys
# Make icons work on non-X11 platforms, import a custom theme
if sys.platform == 'win32':
try:
import ctypes
myappid = 'quantumoptics.quantumdiamond.mainapp' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
except:
print('SetCurrentProcessExplicitAppUserModelID failed! This is '
'probably not Microsoft Windows!')
| gpl-3.0 |
adelomana/30sols | F1.interplay/panel.a/group.expression.distribution.py | 1 | 6548 | ###
### This script makes a figure of the distribution of expression for the different regulatory groups
###
import os,sys,numpy
import matplotlib,matplotlib.pyplot
matplotlib.rcParams.update({'font.size':24,'font.family':'Arial','xtick.labelsize':18,'ytick.labelsize':18})
def histogrammer(theData):
'''
This function creates a histogram.
'''
x=[]; y=[]
binSize=0.1
left=0
right=5
rightBins=numpy.arange(left+binSize,right+binSize,binSize)
n,bins=numpy.histogram(theData,bins=rightBins)
halfBin=(bins[1]-bins[0])/2.
for bin in bins:
center=bin+halfBin
x.append(center)
x.pop()
y=numpy.array(n)
y=list(y/float(sum(y)))
return x,y
def transcriptomicsReader():
'''
This function reads transcriptomics data as in
transcriptomics[trna/rbf][replicate][timepoint][gene]
'''
data={}
geneNames=[]; timepoints=[]; replicates=[]
with open(transcriptomicsDataFile,'r') as f:
header=f.readline()
labels=header.split('\t')[1:-1]
for label in labels:
crumbles=label.split('.')
fraction=crumbles[0]
replicate='br'+crumbles[2]
timepoint='tp.'+crumbles[4]
if replicate not in replicates:
replicates.append(replicate)
if timepoint not in timepoints:
timepoints.append(timepoint)
if fraction not in data.keys():
data[fraction]={}
if replicate not in data[fraction].keys():
data[fraction][replicate]={}
if timepoint not in data[fraction][replicate].keys():
data[fraction][replicate][timepoint]={}
for line in f:
vector=line.split('\t')[:-1]
values=[float(element) for element in vector[1:]]
geneName=vector[0].replace('_','')
if geneName not in geneNames:
geneNames.append(geneName)
for i in range(len(values)):
crumbles=labels[i].split('.')
fraction=crumbles[0]
replicate='br'+crumbles[2]
timepoint='tp.'+crumbles[4]
data[fraction][replicate][timepoint][geneName]=values[i]
return data,geneNames,timepoints,replicates
###
### MAIN
###
# 0. user defined variables
# 0.1. paths
transcriptomicsDataFile='/Volumes/omics4tb/alomana/projects/TLR/data/expression1e3/expressionMatrix.kallisto.txt'
# 1. reading data
print('reading data...')
# 1.1. reading mRNA expression data
rnaExpression,geneNames,timepoints,replicates=transcriptomicsReader()
# 1.2. reading group membership
geneSets={}
elements=os.listdir('results')
for element in elements:
tag=element.split('results.')[1].split('.txt')[0]
geneSets[tag]=[]
# read file
file2read='results/{}'.format(element)
with open(file2read,'r') as f:
for line in f:
v=line.split('\t')
if v[2] not in ['gene-VNGRS13150','gene-VNGRS00005','gene-VNGRS09790','gene-VNGRS10040','gene-VNGRS09800','gene-VNGRS09805','gene-VNGRS03925']:
geneSets[tag].append(v[2])
# add manuall a group with all genes
longNames=['gene-'+element for element in geneNames]
geneSets['all']=longNames
# 2. convert group memmberships into expression distributions
print('converting group memberships into expression distributions...')
expressionDistributions={}
for element in geneSets.keys():
expressionDistributions[element]=[]
# check consistency of mRNA
count=0
for geneName in geneSets[element]:
mRNA_TPMs=[]
shortGeneName=geneName.split('gene-')[1]
for replicate in replicates:
mRNA_TPMs.append(rnaExpression['trna'][replicate]['tp.1'][shortGeneName])
# data transformations and quality check
log10M=numpy.log10(numpy.array(mRNA_TPMs)+1)
log2M=numpy.log2(numpy.array(mRNA_TPMs)+1)
# noise
if numpy.max(log2M) > numpy.log2(10+1): # if expression is below 1 TPMs, don't consider noise
sem=numpy.std(log2M)/numpy.sqrt(len(log2M))
rsem_mRNA=sem/numpy.mean(log2M)
else:
rsem_mRNA=0
if rsem_mRNA < 0.3:
m=numpy.median(log10M)
expressionDistributions[element].append(m)
# 3. define significance of deviation
print('running hypothesis test of deviation...')
groupLabels=list(expressionDistributions.keys())
groupLabels.sort()
groupLabels.remove('dubious')
theColors=['black','black','black','blue','green','orange','red','yellow']
theLineStyle=['-',':','--','-','-','-','-','-']
# run specific groups
#groupLabels=['orange','green']
#theColors=['orange','green']
#groupLabels=['black.minus', 'black.plus','blue','red']
#theColors=['gainsboro','dimgrey','blue','red']
groupLabels=['yellow']
theColors=['yellow']
# make a figure of the overal distribution
x,y=histogrammer(expressionDistributions['all'])
matplotlib.pyplot.plot(x,y,'-',color='black',lw=1)
for i in range(len(groupLabels)):
# resample
numberOfElements=int(1e6)
workingDist=expressionDistributions[groupLabels[i]]
measuredAverage=numpy.mean(workingDist)
averageDist=[]
for j in range(numberOfElements):
sample=numpy.random.choice(expressionDistributions['all'],len(workingDist))
average=numpy.mean(sample)
averageDist.append(average)
# hypothesis test
higherRandoms=sum(numpy.greater(averageDist,measuredAverage))
if higherRandoms > numberOfElements/2:
pvalue=1-(higherRandoms/float(numberOfElements))
else:
pvalue=higherRandoms/float(numberOfElements)
print('Group label {} has a deviation whose p-value is {}. Out of {} trials'.format(groupLabels[i],pvalue,numberOfElements))
# make a figure of the expected group distribution
x,y=histogrammer(averageDist)
matplotlib.pyplot.plot(x,y,linestyle=':',color=theColors[i],lw=2,alpha=0.5)
matplotlib.pyplot.axvline(x=measuredAverage,color=theColors[i],linestyle='-',lw=3)
matplotlib.pyplot.xlim([-0.1,4.])
matplotlib.pyplot.ylim([-0.01,0.6])
matplotlib.pyplot.xlabel('mRNA (log$_{10}$ TPM+1)')
matplotlib.pyplot.ylabel('Probability')
matplotlib.pyplot.tight_layout()
#matplotlib.pyplot.savefig('figure.expression.distribution.TL.pdf')
#matplotlib.pyplot.savefig('figure.expression.distribution.TC.pdf')
matplotlib.pyplot.savefig('figure.expression.distribution.yellow.pdf')
matplotlib.pyplot.clf()
| gpl-3.0 |
appapantula/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_distinct_values.py | 1 | 5055 | from typing import Any, Dict, Optional, Tuple
from great_expectations.core import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
column_aggregate_value,
)
from great_expectations.expectations.metrics.metric_provider import metric_value
from great_expectations.validator.validation_graph import MetricConfiguration
class ColumnDistinctValues(ColumnMetricProvider):
metric_name = "column.distinct_values"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return set(column.unique())
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
observed_value_counts = metrics["column.value_counts"]
return set(observed_value_counts.index)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
observed_value_counts = metrics["column.value_counts"]
return set(observed_value_counts.index)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(
execution_engine, (SqlAlchemyExecutionEngine, SparkDFExecutionEngine)
):
dependencies["column.value_counts"] = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"sort": "value",
"collate": None,
},
)
return dependencies
class ColumnDistinctValuesCount(ColumnMetricProvider):
metric_name = "column.distinct_values.count"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.nunique()
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
observed_value_counts = metrics["column.value_counts"]
return len(observed_value_counts)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: "SqlAlchemyExecutionEngine",
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
observed_value_counts = metrics["column.value_counts"]
return len(observed_value_counts)
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[Dict] = None,
):
"""Returns a dictionary of given metric names and their corresponding configuration,
specifying the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if isinstance(
execution_engine, (SqlAlchemyExecutionEngine, SparkDFExecutionEngine)
):
dependencies["column.value_counts"] = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"sort": "value",
"collate": None,
},
)
return dependencies
| apache-2.0 |
LohithBlaze/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
carrillo/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
wetdesert/rad2py | ide2py/web2py.py | 8 | 5464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"Async mono-thread web2py (development) server extension to ide2py"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0"
# Just for debug by now, based on web2py widget app and stlib serve_forever
# WARNING: do not request a web2py page from main thread! (it will block!)
import os
import select
from wsgiref.simple_server import make_server, demo_app
from urlparse import urlparse
import sys
import traceback
import wx
if False:
# let pyinstaller to detect web2py modules
# hook-gluon.main.py is needed in pyinstaller/hooks
# with hiddenimports = gluon.import_all.base_modules
# web2py must be installed on parent folder
import gluon.main
# this libraries are required by psp2py
import matplotlib
import matplotlib.pyplot
import matplotlib.colors
import numpy
import pylab
ID_ATTACH = wx.NewId()
class Web2pyMixin(object):
"ide2py extension to execute web2py under debugger and shell"
def __init__(self):
self.menu['run'].Append(ID_ATTACH,
"Start &webserver\tCtrl-Alt-W",
"Start and attach embedded local web2py server")
self.Bind(wx.EVT_MENU, self.OnAttachWebserver, id=ID_ATTACH)
def OnAttachWebserver(self, event):
"start-up a web2py server instance"
# read configuration with safe defaults
cfg = wx.GetApp().get_config("WEB2PY")
path = cfg.get("path", "../web2py")
password = cfg.get("password", "a")
port = cfg.get("port", 8006)
host = "127.0.0.1"
if path:
# store current directory
prevdir = os.path.abspath(os.curdir)
try:
# update current directory and python path to find web2py:
os.chdir(path)
sys.path.insert(0, os.path.abspath(os.curdir))
from gluon.main import wsgibase, save_password
from gluon.contrib import qdb
# store admin password
save_password(password, port)
web2py_env = {} ##self.build_web2py_environment()
# Start a alternate web2py in a separate thread (for blocking requests)
from threading import Thread
def server(host, port, password):
save_password(password, port)
qdb.init(redirect=False)
qdb.qdb.do_debug()
def wrapped_app(environ, start_response):
"WSGI wrapper to allow debugging"
# hanshake with front-end on each request (update ui)
# not realy needed (request processing is sequential)
##qdb.qdb.startup()
# process the request as usual
return wsgibase(environ, start_response)
httpd2 = make_server(host, port, wrapped_app)
print "THREAD - Serving HTTP on port2 %s..." % port
httpd2.serve_forever(poll_interval=0.01)
thread = Thread(target=server, args=(host, port, password))
thread.daemon = True # close on exit
thread.start()
# open internal browser at default page:
url = "http://%s:%s/" % (host, port)
if self.browser:
self.browser.LoadURL(url)
pass
else:
# no internal browser, open external one
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
except Exception, e:
self.ShowInfoBar(u"cannot start web2py!: %s" % unicode(e),
flags=wx.ICON_ERROR, key="web2py")
web2py_env = {}
finally:
# recover original directory
os.chdir(prevdir)
self.web2py_environment = web2py_env
def build_web2py_environment(self):
"build a namespace suitable for editor autocompletion and calltips"
# warning: this can alter current global variable, use with care!
try:
from gluon.globals import Request, Response, Session
from gluon.compileapp import build_environment, DAL
request = Request({})
response = Response()
session = Session()
# fake request values
request.folder = ""
request.application = "welcome"
request.controller = "default"
request.function = "index"
ns = build_environment(request, response, session, )
# fake common model objects
db = ns['db'] = DAL("sqlite:memory")
from gluon.tools import Auth, Crud, Service
ns['auth'] = Auth(db)
ns['crud'] = Crud(db)
ns['service'] = Service()
except Exception, e:
traceback.print_exc()
ns = {}
return ns
def web2py_namespace(self):
return self.web2py_environment
| gpl-3.0 |
ML-KULeuven/socceraction | socceraction/xthreat.py | 1 | 13033 | # -*- coding: utf-8 -*-
"""Implements the xT framework."""
from typing import Callable, List, Tuple
import numpy as np # type: ignore
import pandas as pd # type: ignore
from pandera.typing import DataFrame, Series
import socceraction.spadl.config as spadlconfig
from socceraction.spadl.base import SPADLSchema
try:
from scipy.interpolate import interp2d # type: ignore
except ImportError:
interp2d = None
M: int = 12
N: int = 16
def _get_cell_indexes(x: Series, y: Series, l: int = N, w: int = M) -> Tuple[Series, Series]:
xmin = 0
ymin = 0
xi = (x - xmin) / spadlconfig.field_length * l
yj = (y - ymin) / spadlconfig.field_width * w
xi = xi.astype(int).clip(0, l - 1)
yj = yj.astype(int).clip(0, w - 1)
return xi, yj
def _get_flat_indexes(x: Series, y: Series, l: int = N, w: int = M) -> Series:
xi, yj = _get_cell_indexes(x, y, l, w)
return l * (w - 1 - yj) + xi
def _count(x: Series, y: Series, l: int = N, w: int = M) -> np.ndarray:
"""Count the number of actions occurring in each cell of the grid.
Parameters
----------
x : pd.Series
The x-coordinates of the actions.
y : pd.Series
The y-coordinates of the actions.
l : int
Amount of grid cells in the x-dimension of the grid.
w : int
Amount of grid cells in the y-dimension of the grid.
Returns
-------
np.ndarray
A matrix, denoting the amount of actions occurring in each cell. The
top-left corner is the origin.
"""
x = x[~np.isnan(x) & ~np.isnan(y)]
y = y[~np.isnan(x) & ~np.isnan(y)]
flat_indexes = _get_flat_indexes(x, y, l, w)
vc = flat_indexes.value_counts(sort=False)
vector = np.zeros(w * l)
vector[vc.index] = vc
return vector.reshape((w, l))
def _safe_divide(a: np.ndarray, b: np.ndarray) -> np.ndarray:
return np.divide(a, b, out=np.zeros_like(a), where=b != 0)
def scoring_prob(actions: DataFrame[SPADLSchema], l: int = N, w: int = M) -> np.ndarray:
"""Compute the probability of scoring when taking a shot for each cell.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
l : int
Amount of grid cells in the x-dimension of the grid.
w : int
Amount of grid cells in the y-dimension of the grid.
Returns
-------
np.ndarray
A matrix, denoting the probability of scoring for each cell.
"""
shot_actions = actions[(actions.type_name == 'shot')]
goals = shot_actions[(shot_actions.result_name == 'success')]
shotmatrix = _count(shot_actions.start_x, shot_actions.start_y, l, w)
goalmatrix = _count(goals.start_x, goals.start_y, l, w)
return _safe_divide(goalmatrix, shotmatrix)
def get_move_actions(actions: DataFrame[SPADLSchema]) -> DataFrame[SPADLSchema]:
"""Get all ball-progressing actions.
These include passes, dribbles and crosses. Take-ons are ignored because
they typically coincide with dribbles and do not move the ball to
a different cell.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
Returns
-------
pd.DataFrame
All ball-progressing actions in the input dataframe.
"""
return actions[
(actions.type_name == 'pass')
| (actions.type_name == 'dribble')
| (actions.type_name == 'cross')
]
def get_successful_move_actions(actions: DataFrame[SPADLSchema]) -> DataFrame[SPADLSchema]:
"""Get all successful ball-progressing actions.
These include successful passes, dribbles and crosses.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
Returns
-------
pd.DataFrame
All ball-progressing actions in the input dataframe.
"""
move_actions = get_move_actions(actions)
return move_actions[move_actions.result_name == 'success']
def action_prob(
actions: DataFrame[SPADLSchema], l: int = N, w: int = M
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the probability of taking an action in each cell of the grid.
The options are: shooting or moving.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
l : pd.DataFrame
Amount of grid cells in the x-dimension of the grid.
w : pd.DataFrame
Amount of grid cells in the y-dimension of the grid.
Returns
-------
shotmatrix : np.ndarray
For each cell the probability of choosing to shoot.
movematrix : np.ndarray
For each cell the probability of choosing to move.
"""
move_actions = get_move_actions(actions)
shot_actions = actions[(actions.type_name == 'shot')]
movematrix = _count(move_actions.start_x, move_actions.start_y, l, w)
shotmatrix = _count(shot_actions.start_x, shot_actions.start_y, l, w)
totalmatrix = movematrix + shotmatrix
return _safe_divide(shotmatrix, totalmatrix), _safe_divide(movematrix, totalmatrix)
def move_transition_matrix(actions: DataFrame[SPADLSchema], l: int = N, w: int = M) -> np.ndarray:
"""Compute the move transition matrix from the given actions.
This is, when a player chooses to move, the probability that he will
end up in each of the other cells of the grid successfully.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
l : int
Amount of grid cells in the x-dimension of the grid.
w : int
Amount of grid cells in the y-dimension of the grid.
Returns
-------
np.ndarray
The transition matrix.
"""
move_actions = get_move_actions(actions)
X = pd.DataFrame()
X['start_cell'] = _get_flat_indexes(move_actions.start_x, move_actions.start_y, l, w)
X['end_cell'] = _get_flat_indexes(move_actions.end_x, move_actions.end_y, l, w)
X['result_name'] = move_actions.result_name
vc = X.start_cell.value_counts(sort=False)
start_counts = np.zeros(w * l)
start_counts[vc.index] = vc
transition_matrix = np.zeros((w * l, w * l))
for i in range(0, w * l):
vc2 = X[((X.start_cell == i) & (X.result_name == 'success'))].end_cell.value_counts(
sort=False
)
transition_matrix[i, vc2.index] = vc2 / start_counts[i]
return transition_matrix
class ExpectedThreat:
"""An implementation of the Expected Threat (xT) model [Singh2019]_.
Parameters
----------
l : int
Amount of grid cells in the x-dimension of the grid.
w : int
Amount of grid cells in the y-dimension of the grid.
eps : float
The desired precision to calculate the xT value of a cell. Default is
5 decimal places of precision (1e-5).
Attributes
----------
l : int
Amount of grid cells in the x-dimension of the grid.
w : int
Amount of grid cells in the y-dimension of the grid.
eps : float
The desired precision to calculate the xT value of a cell. Default is
5 decimal places of precision (1e-5).
heatmaps : list(np.ndarray)
The i-th element corresponds to the xT value surface after i iterations.
xT : np.ndarray
The final xT value surface.
scoring_prob_matrix : np.ndarray, shape(M,N)
The probability of scoring when taking a shot for each cell.
shot_prob_matrix : np.ndarray, shape(M,N)
The probability of choosing to shoot for each cell.
move_prob_matrix : np.ndarray, shape(M,N)
The probability of choosing to move for each cell.
transition_matrix : np.ndarray, shape(M*N,M*N)
When moving, the probability of moving to each of the other zones.
.. [Singh2019] Singh, Karun. "Introducing Expected Threat (xT)." 15 February, 2019.
https://karun.in/blog/expected-threat.html
"""
def __init__(self, l: int = N, w: int = M, eps: float = 1e-5):
self.l = l
self.w = w
self.eps = eps
self.heatmaps: List[np.ndarray] = []
self.xT: np.ndarray = np.zeros((w, l))
self.scoring_prob_matrix: np.ndarray = np.zeros((w, l))
self.shot_prob_matrix: np.ndarray = np.zeros((w, l))
self.move_prob_matrix: np.ndarray = np.zeros((w, l))
self.transition_matrix: np.ndarray = np.zeros((w * l, w * l))
def __solve(
self,
p_scoring: np.ndarray,
p_shot: np.ndarray,
p_move: np.ndarray,
transition_matrix: np.ndarray,
) -> None:
"""Solves the expected threat equation with dynamic programming.
Parameters
----------
p_scoring : (np.ndarray, shape(M, N)):
Probability of scoring at each grid cell, when shooting from that cell.
p_shot : (np.ndarray, shape(M,N)):
For each grid cell, the probability of choosing to shoot from there.
p_move : (np.ndarray, shape(M,N)):
For each grid cell, the probability of choosing to move from there.
transition_matrix : (np.ndarray, shape(M*N,M*N)):
When moving, the probability of moving to each of the other zones.
"""
gs = p_scoring * p_shot
diff = 1
it = 0
self.heatmaps.append(self.xT.copy())
while np.any(diff > self.eps):
total_payoff = np.zeros((self.w, self.l))
for y in range(0, self.w):
for x in range(0, self.l):
for q in range(0, self.w):
for z in range(0, self.l):
total_payoff[y, x] += (
transition_matrix[self.l * y + x, self.l * q + z] * self.xT[q, z]
)
newxT = gs + (p_move * total_payoff)
diff = newxT - self.xT
self.xT = newxT
self.heatmaps.append(self.xT.copy())
it += 1
print('# iterations: ', it)
def fit(self, actions: DataFrame[SPADLSchema]) -> 'ExpectedThreat':
"""Fits the xT model with the given actions.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
Returns
-------
self
Fitted xT model.
"""
self.scoring_prob_matrix = scoring_prob(actions, self.l, self.w)
self.shot_prob_matrix, self.move_prob_matrix = action_prob(actions, self.l, self.w)
self.transition_matrix = move_transition_matrix(actions, self.l, self.w)
self.__solve(
self.scoring_prob_matrix,
self.shot_prob_matrix,
self.move_prob_matrix,
self.transition_matrix,
)
return self
def interpolator(self, kind: str = 'linear') -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""Interpolate over the pitch.
This is a wrapper around :func:`scipy.interpolate.interp2d`.
Parameters
----------
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is ‘linear’.
Returns
-------
callable
A function that interpolates xT values over the pitch.
"""
if interp2d is None:
raise ImportError('Interpolation requires scipy to be installed.')
cell_length = spadlconfig.field_length / self.l
cell_width = spadlconfig.field_width / self.w
x = np.arange(0.0, spadlconfig.field_length, cell_length) + 0.5 * cell_length
y = np.arange(0.0, spadlconfig.field_width, cell_width) + 0.5 * cell_width
return interp2d(x=x, y=y, z=self.xT, kind=kind, bounds_error=False)
def predict(
self, actions: DataFrame[SPADLSchema], use_interpolation: bool = False
) -> np.ndarray:
"""Predicts the xT values for the given actions.
Parameters
----------
actions : pd.DataFrame
Actions, in SPADL format.
use_interpolation : bool
Indicates whether to use bilinear interpolation when inferring xT
values. Note that this requires Scipy to be installed (pip install
scipy).
Returns
-------
np.ndarray
The xT value for each action.
"""
if not use_interpolation:
l = self.l
w = self.w
grid = self.xT
else:
# Use interpolation to create a
# more fine-grained 1050 x 680 grid
interp = self.interpolator()
l = int(spadlconfig.field_length * 10)
w = int(spadlconfig.field_width * 10)
xs = np.linspace(0, spadlconfig.field_length, l)
ys = np.linspace(0, spadlconfig.field_width, w)
grid = interp(xs, ys)
startxc, startyc = _get_cell_indexes(actions.start_x, actions.start_y, l, w)
endxc, endyc = _get_cell_indexes(actions.end_x, actions.end_y, l, w)
xT_start = grid[w - 1 - startyc, startxc]
xT_end = grid[w - 1 - endyc, endxc]
return xT_end - xT_start
| mit |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/nnvm/tutorials/from_mxnet.py | 2 | 5160 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy mxnet models with NNVM.
For us to begin with, mxnet module is required to be installed.
A quick solution is
.. code-block:: bash
pip install mxnet --user
or please refer to offical installation guide.
https://mxnet.incubator.apache.org/versions/master/install/index.html
"""
# some standard imports
import mxnet as mx
import numpy as np
import nnvm
import tvm
from tvm.contrib.download import download_testdata
######################################################################
# Download Resnet18 model from Gluon Model Zoo
# ---------------------------------------------
# In this section, we download a pretrained imagenet model and classify an image.
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
from matplotlib import pyplot as plt
block = get_model('resnet18_v1', pretrained=True)
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_name = 'cat.png'
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/',
'imagenet1000_clsid_to_human.txt'])
synset_name = 'imagenet1000_clsid_to_human.txt'
img_path = download_testdata(img_url, img_name, module='data')
synset_path = download_testdata(synset_url, synset_name, module='data')
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123., 117., 104.])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print('x', x.shape)
######################################################################
# Compile the Graph
# -----------------
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
sym, params = nnvm.frontend.from_mxnet(block)
# we want a probability so add a softmax operator
sym = nnvm.sym.softmax(sym)
######################################################################
# now compile the graph
import nnvm.compiler
target = 'cuda'
shape_dict = {'data': x.shape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_runtime
ctx = tvm.gpu(0)
dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()[0])
print('TVM prediction top-1:', top1, synset[top1])
######################################################################
# Use MXNet symbol with pretrained weights
# ----------------------------------------
# MXNet often use `arg_params` and `aux_params` to store network parameters
# separately, here we show how to use these weights with existing API
def block2symbol(block):
data = mx.sym.Variable('data')
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
# usually we would save/load it as checkpoint
mx.model.save_checkpoint('resnet18_v1', 0, mx_sym, args, auxs)
# there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk
######################################################################
# for a normal mxnet model, we start from here
mx_sym, args, auxs = mx.model.load_checkpoint('resnet18_v1', 0)
# now we use the same API to get NNVM compatible symbol
nnvm_sym, nnvm_params = nnvm.frontend.from_mxnet(mx_sym, args, auxs)
# repeat the same steps to run this model using TVM
| apache-2.0 |
legacysurvey/legacypipe | py/obiwan/decals_sim_unit_test.py | 2 | 6467 | """
unit test script for functions in decals_sim.py
-- creates a tim object and sims stamp
-- allows user to play with these on command line and confirm masking, invver behaves as expected, fluxes are correct, etc.
RUN:
python legacyanalysis/decals_sim_test_wone_tim.py
or
ipython
%run legacyanalysis/decals_sim_test_wone_tim.py
USE:
run with ipython then can play with tim, stamp objects on command line!!
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg')
import os
import sys
#from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.table import Table, Column, vstack
import galsim
import photutils
###
#from tractor.psfex import PsfEx, PixelizedPsfEx
#from tractor import Tractor
from tractor.basics import (NanoMaggies, PointSource, GaussianMixtureEllipsePSF,PixelizedPSF, RaDecPos)
#from astrometry.util.fits import fits_table
#from legacypipe.survey import wcs_for_brick
###
from legacyanalysis.decals_sim import SimDecals,get_metadata_other,get_ith_simcat
def get_one_tim(brickwcs=None, metacat=None, simcat=None, output_dir=None):
'''return metacat,simcat, one tim object
metacat,simcat -- from decals_sim
tim object -- for same ra,dec range of metacat,simcat'''
assert(brickwcs is not None and simcat is not None)
survey = SimDecals(metacat=metacat, simcat=simcat, output_dir=output_dir)
#brick = survey.get_brick_by_name(brickname)
#targetwcs = wcs_for_brick(brick) # W=W, H=H, pixscale=pixscale)
ccds = survey.ccds_touching_wcs(d['brickwcs'], ccdrad=None)
if ccds is None:
raise NothingToDoError('No CCDs touching brick')
print(len(ccds), 'CCDs touching target WCS')
# Sort images by band -- this also eliminates images whose
# *image.filter* string is not in *bands*.
print('Unique filters:', np.unique(ccds.filter))
bands='grz'
ccds.cut(np.hstack([np.flatnonzero(ccds.filter == band) for band in bands]))
print('Cut on filter:', len(ccds), 'CCDs remain.')
print('Cutting out non-photometric CCDs...')
I = survey.photometric_ccds(ccds)
print(len(I), 'of', len(ccds), 'CCDs are photometric')
ccds.cut(I)
#just first ccd
ccd= ccds[0]
# tim.data is image+sims, tim.sims_image is sims
im = survey.get_image_object(ccd)
kwargs = dict(pixPsf=True, splinesky=True)
tim = im.get_tractor_image(**kwargs)
return tim
#def get_metacat(brickname,objtype,nobj,chunksize,nchunk,zoom,rmag_range):
# '''following decals_sim'''
# metacols = [
# ('BRICKNAME', 'S10'),
# ('OBJTYPE', 'S10'),
# ('NOBJ', 'i4'),
# ('CHUNKSIZE', 'i2'),
# ('NCHUNK', 'i2'),
# ('ZOOM', 'i4', (4,)),
# ('SEED', 'S20'),
# ('RMAG_RANGE', 'f4', (2,))]
# metacat = Table(np.zeros(1, dtype=metacols))
#
# metacat['BRICKNAME'] = brickname
# metacat['OBJTYPE'] = objtype
# metacat['NOBJ'] = nobj
# metacat['CHUNKSIZE'] = chunksize
# metacat['NCHUNK'] = nchunk
# metacat['ZOOM'] = zoom
# metacat['RMAG_RANGE'] = rmag_range
# return metacat
def plot_tim(tim):
'''basic plotting func'''
fig = plt.figure(figsize=(5,10))
ax = fig.gca()
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.imshow(tim.getImage(), **tim.ima)
ax.axis('off')
fig.savefig('./test.png',bbox_inches='tight')
def check_poisson_noise(stamp,ivarstamp,objstamp):
'''each pixel of stamp+noise image - stamp image should be gaussian distributed with std dev = sqrt(pix value in stamp)'''
diff=np.zeros((stamp.array.shape[0],stamp.array.shape[1],1000))
for cnt in range(diff.shape[-1]):
stamp_copy= stamp.copy()
ivarstamp_copy= ivarstamp.copy()
stamp_copy, ivarstamp_copy = objstamp.addnoise(stamp_copy, ivarstamp_copy)
diff[:,:,cnt]= stamp_copy.array-stamp.array
one_std= np.sqrt( np.sqrt(stamp.array**2))
for x in np.arange(stamp.array.shape[0])[::4]:
for y in np.arange(stamp.array.shape[1])[::4]:
junk= plt.hist(diff[x,y,:],range=(-2*one_std[x,y],2*one_std[x,y]))
plt.savefig('x%d_y%d_hist.png')
plt.close()
#def main():
# Loook for data on Edison SCRATCH
os.environ['LEGACY_SURVEY_DIR']='/scratch1/scratchdirs/kaylanb/desi/dr3_brick_2523p355'
os.environ['DUST_DIR']='/scratch1/scratchdirs/kaylanb/desi/dr3_brick_2523p355/dust/v0_0'
# Decals Sim
d= get_metadata_other()
get_ith_simcat(1, d=d)
tim= get_one_tim(brickwcs=d['brickwcs'],metacat=d['metacat'], simcat=d['simcat'], \
output_dir=d['simcat_dir'])
# simcat X,Y may be outside image if data did not fill brick
xlim,ylim=tim.data.shape
keep=np.all((d['simcat']['X'] <= xlim-1,d['simcat']['Y'] <= ylim-1),axis=0)
# Aperature flux
#nobj,seed = 500,N
#metacat= get_metacat(args.brickname,'STAR',nobj,500,1,(0,3600,0,3600),(18, 26))
#simcat = build_simcat(nobj, args.brickname, brickwcs, metacat, seed)
#stamp_builder = BuildStamp(tim, gain=ccd.arawgain, seed=seed)
ap_flux=np.zeros(len(d['simcat'][keep]))-1
ap_size=7. #arcsec
pixsc=0.262 #decam
for i,obj in enumerate(d['simcat'][keep]):
aper=photutils.CircularAperture((obj['X'],obj['Y']),ap_size/pixsc)
p = photutils.aperture_photometry(tim.sims_image, aper) # error=np.zeros(stamp.array.shape)
ap_flux[i]= p['aperture_sum']
#PROBLEM: ap_flux all zeros
#stamp
#unit test after this or run from ipython to play with on command line
####
# check_poisson_noise()
####
#in decals_sim.py have a test option that if turned on makes 3panel yellow box plots, using code like below
#tim.sims_image= sims_image.array
#tim.sims_inverr= np.sqrt(sims_ivar.array)
#tim.sims_xy= tim.sims_xy.astype(int)
#tim.data = image.array + sims_image.array
#tim.inverr = np.sqrt(invvar.array + sims_ivar.array)
#plot image,image regions where have sims, just sims as 3 plot panel with yellow boxes
#basename= plots.get_basename(self.imgfn)
#plots.image_v_stamp([tim.data,tim.data-tim.sims_image,tim.sims_image], \
# xy_lim= tim.sims_xy, name=os.path.join(self.survey.output_dir,"image_v_stamp_%s.png" % basename))
#plots.image_v_stamp([np.power(tim.inverr,-1),np.power(tim.sims_inverr,-1)], \
# xy_lim= tim.sims_xy, titles=['image_std','sims_std'],\
#name=os.path.join(self.survey.output_dir,"std_%s.png" % basename))
#print('exiting early')
#sys.exit()
#if __name__ == "__main__":
# main()
| bsd-3-clause |
artmusic0/theano-learning.part02 | Training_data 4.0/rd_file_resize_rand_gz.py | 1 | 4144 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 04:03:19 2015
@author: winpython
"""
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import cPickle, pickle
import gzip
thelist = np.array(['8_7', '11_1', '2_8', '13_4', '18_5', '7_1', '0_3', '1_0', '19_7', '3_3', '5_0',
'7_5', '7_3', '12_7', '6_4', '10_0', '10_1', '5_1', '10_8', '12_9', '8_2', '19_8',
'4_5', '14_9', '7_9', '19_2', '18_3', '15_1', '3_1', '6_1', '14_2', '7_4', '17_0',
'19_0', '5_4', '14_8', '15_5', '15_8', '7_6', '16_2', '8_3', '1_8', '13_8', '3_8',
'8_1', '11_8', '11_3', '17_5', '8_0', '8_6', '12_0', '18_2', '17_2', '17_8', '9_9',
'4_1', '11_0', '18_6', '13_3', '19_3', '10_5', '4_6', '5_3', '16_1', '12_3', '15_6',
'7_7', '17_4', '1_1', '4_7', '10_3', '3_6', '1_5', '11_4', '16_8', '9_3', '3_7',
'8_9', '13_9', '5_9', '11_9', '4_3', '0_2', '19_9', '2_0', '0_0', '10_9', '13_0',
'0_8', '8_5', '13_5', '8_8', '19_6', '12_1', '0_1', '4_8', '9_6', '0_4', '9_4',
'6_2', '19_5', '1_3', '17_3', '4_0', '19_4', '0_6', '9_0', '2_5', '14_5', '12_2',
'15_3', '18_4', '1_7', '11_5', '1_2', '1_4', '12_6', '18_8', '15_2', '16_7', '12_5',
'16_5', '10_2', '14_6', '5_8', '4_4', '15_4', '13_6', '16_4', '3_4', '19_1', '14_4',
'4_9', '6_8', '0_9', '1_6', '15_0', '5_7', '14_7', '2_3', '5_6', '14_0', '2_4',
'10_6', '17_6', '11_7', '13_2', '6_3', '0_5', '2_1', '3_2', '11_2', '2_9', '14_3',
'16_3', '17_9', '5_2', '18_1', '12_8', '6_5', '9_7', '9_8', '9_1', '6_6', '11_6',
'7_2', '8_4', '9_2', '5_5', '18_7', '16_0', '3_5', '14_1', '2_7', '13_7', '4_2',
'6_9', '3_0', '13_1', '1_9', '18_9', '7_8', '17_7', '16_6', '17_1', '9_5', '3_9',
'0_7', '18_0', '6_0', '6_7', '2_6', '15_9', '15_7', '10_4', '10_7', '16_9', '7_0',
'12_4', '2_2'])
final_output = np.zeros((200,147456),dtype=np.float32)
final_label = np.array([8, 11, 2, 13, 18, 7, 0, 1, 19, 3, 5, 7, 7, 12, 6, 10, 10, 5, 10, 12, 8, 19, 4, 14, 7, 19, 18, 15, 3, 6, 14, 7, 17, 19, 5, 14, 15, 15, 7, 16, 8, 1, 13, 3, 8, 11, 11, 17, 8, 8, 12, 18, 17, 17, 9, 4, 11, 18, 13, 19, 10, 4, 5, 16, 12, 15, 7, 17, 1, 4, 10, 3, 1, 11, 16, 9, 3, 8, 13, 5, 11, 4, 0, 19, 2, 0, 10, 13, 0, 8, 13, 8, 19, 12, 0, 4, 9, 0, 9, 6, 19, 1, 17, 4, 19, 0, 9, 2, 14, 12, 15, 18, 1, 11, 1, 1, 12, 18, 15, 16, 12, 16, 10, 14, 5, 4, 15, 13, 16, 3, 19, 14, 4, 6, 0, 1, 15, 5, 14, 2, 5, 14, 2, 10, 17, 11, 13, 6, 0, 2, 3, 11, 2, 14, 16, 17, 5, 18, 12, 6, 9, 9, 9, 6, 11, 7, 8, 9, 5, 18, 16, 3, 14, 2, 13, 4, 6, 3, 13, 1, 18, 7, 17, 16, 17, 9, 3, 0, 18, 6, 6, 2, 15, 15, 10, 10, 16, 7, 12, 2],dtype=np.int64)
for i in range(200):
print "reading", i, "..."
pil_im = Image.open( "training_font_transparent/" + thelist[i] + ".jpg" ).convert('L')
#imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((512, 288), Image.BILINEAR )
pil_im = np.array(pil_im)
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
#print("test")
#print(pil_im)
note = 0
for j in range(288):
for k in range(512):
final_output[i][note]= ((255 - pil_im[j][k])/225.)
note += 1
print " "
print "Finished Picture..."
print "Starting label"
print "Finished Labeling..."
print "Starting cpickle"
outputandlabel = final_output, final_label
f = gzip.open("training_data_200v4.pkl.gz", 'wb')
cPickle.dump(outputandlabel, f)
f.close()
print "Finished cPickle..."
print "\ ! congradulation ! /"
#f = open("pic1.txt", "r")
'''
imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((28, 28), Image.BILINEAR )
pil_im = np.array(pil_im)
#print(np.array(pil_im))
#imshow(np.asarray(pil_im))
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
print("test")
print(pil_im)
''' | gpl-3.0 |
fredhusser/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
pravsripad/mne-python | tutorials/evoked/plot_eeg_erp.py | 4 | 10599 | """
.. _tut_erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
"""
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.channels import combine_channels
###############################################################################
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname)
###############################################################################
# Let's restrict the data to the EEG channels
raw.pick_types(meg=False, eeg=True, eog=True).load_data()
# This particular dataset already has an average reference projection added
# that we now want to remove it for the sake of this example.
raw.set_eeg_reference([])
###############################################################################
# By looking at the measurement info you will see that we have now
# 59 EEG channels and 1 EOG channel
print(raw.info)
###############################################################################
# In practice it's quite common to have some EEG channels that are actually
# EOG channels. To change a channel type you can use the
# :func:`mne.io.Raw.set_channel_types` method. For example
# to treat an EOG channel as EEG you can change its type using
raw.set_channel_types(mapping={'EOG 061': 'eeg'})
print(raw.info)
###############################################################################
# And to change the name of the EOG channel
raw.rename_channels(mapping={'EOG 061': 'EOG'})
###############################################################################
# Let's reset the EOG channel back to EOG type.
raw.set_channel_types(mapping={'EOG': 'eog'})
###############################################################################
# The EEG channels in the sample dataset already have locations.
# These locations are available in the 'loc' of each channel description.
# For the first channel we get
print(raw.info['chs'][0]['loc'])
###############################################################################
# And it's actually possible to plot the channel locations using
# :func:`mne.io.Raw.plot_sensors`.
# In the case where your data don't have locations you can use one of the
# standard :class:`Montages <mne.channels.DigMontage>` shipped with MNE.
# See :ref:`plot_montage` and :ref:`tut-eeg-fsaverage-source-modeling`.
raw.plot_sensors()
raw.plot_sensors('3d') # in 3D
###############################################################################
# Setting EEG reference
# ---------------------
#
# Let's first inspect our Raw object with its original reference that was
# applied during the recording of the data.
# We define Epochs and compute an ERP for the left auditory condition.
reject = dict(eeg=180e-6, eog=150e-6)
event_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5
events = mne.read_events(event_fname)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
evoked_no_ref = mne.Epochs(raw, **epochs_params).average()
title = 'EEG Original reference'
evoked_no_ref.plot(titles=dict(eeg=title), time_unit='s')
evoked_no_ref.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Common average reference (car)**: We add back the average reference
# projection that we removed at the beginning of this example (right after
# loading the data).
raw_car, _ = mne.set_eeg_reference(raw, 'average', projection=True)
evoked_car = mne.Epochs(raw_car, **epochs_params).average()
del raw_car # save memory
title = 'EEG Average reference'
evoked_car.plot(titles=dict(eeg=title), time_unit='s')
evoked_car.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Custom reference**: Use the mean of channels EEG 001 and EEG 002 as
# a reference
raw_custom, _ = mne.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
del raw_custom # save memory
title = 'EEG Custom reference'
evoked_custom.plot(titles=dict(eeg=title), time_unit='s')
evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# Global field power (GFP)
# ------------------------
#
# Global field power :footcite:`Lehmann1980,Lehmann1984,Murray2008` is,
# generally speaking, a measure of agreement of the signals picked up by all
# sensors across the entire scalp: if all sensors have the same value at a
# given time point, the GFP will be zero at that time point; if the signals
# differ, the GFP will be non-zero at that time point. GFP
# peaks may reflect "interesting" brain activity, warranting further
# investigation. Mathematically, the GFP is the population standard
# deviation across all sensors, calculated separately for every time point.
#
# You can plot the GFP using `evoked.plot(gfp=True) <mne.Evoked.plot>`. The GFP
# trace will be black if ``spatial_colors=True`` and green otherwise. The EEG
# reference will not affect the GFP:
for evk in (evoked_car, evoked_no_ref):
evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-10, 10]))
###############################################################################
# To plot the GFP by itself you can pass ``gfp='only'`` (this makes it easier
# to read off the GFP data values, because the scale is aligned):
evoked_car.plot(gfp='only')
###############################################################################
# As stated above, the GFP is the population standard deviation of the signal
# across channels. To compute it manually, we can leverage
# the fact that `evoked.data <mne.Evoked.data>` is a NumPy array:
gfp = evoked_car.data.std(axis=0, ddof=0)
# Reproducing the plot style from above:
fig, ax = plt.subplots()
ax.plot(evoked_car.times, gfp * 1e6, color='lime')
ax.fill_between(evoked_car.times, gfp * 1e6, color='lime', alpha=0.2)
ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG')
###############################################################################
# Evoked response averaged across channels by ROI
# -----------------------------------------------
#
# It is possible to average channels by region of interest (for example left
# and right) when studying the response to this left auditory stimulus. Here we
# use our Raw object on which the average reference projection has been added
# back.
evoked = mne.Epochs(raw, **epochs_params).average()
left_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 017', 'EEG 018', 'EEG 025', 'EEG 026'])
right_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 023', 'EEG 024', 'EEG 034', 'EEG 035'])
roi_dict = dict(Left=left_idx, Right=right_idx)
evoked_combined = combine_channels(evoked, roi_dict, method='mean')
title = 'Evoked response averaged by side'
evoked_combined.plot(titles=dict(eeg=title), time_unit='s')
###############################################################################
# Evoked arithmetic (e.g. differences)
# ------------------------------------
#
# Trial subsets from Epochs can be selected using 'tags' separated by '/'.
# Evoked objects support basic arithmetic.
# First, we create an Epochs object containing 4 conditions.
event_id = {'left/auditory': 1, 'right/auditory': 2,
'left/visual': 3, 'right/visual': 4}
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
epochs = mne.Epochs(raw, **epochs_params)
print(epochs)
###############################################################################
# Next, we create averages of stimulation-left vs stimulation-right trials.
# We can use negative weights in `mne.combine_evoked` to construct difference
# ERPs.
left, right = epochs["left"].average(), epochs["right"].average()
# create and plot difference ERP
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
mne.combine_evoked([left, right], weights=[1, -1]).plot_joint(**joint_kwargs)
###############################################################################
# This is an equal-weighting difference. If you have imbalanced trial numbers,
# you could also consider either equalizing the number of events per
# condition (using
# `epochs.equalize_event_counts <mne.Epochs.equalize_event_counts>`) or
# use weights proportional to the number of trials averaged together to create
# each `~mne.Evoked` (by passing ``weights='nave'`` to `~mne.combine_evoked`).
# As an example, first, we create individual ERPs for each condition.
aud_l = epochs["auditory/left"].average()
aud_r = epochs["auditory/right"].average()
vis_l = epochs["visual/left"].average()
vis_r = epochs["visual/right"].average()
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)
###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)
# Then, we can construct and plot an unweighted average of left vs. right
# trials this way, too:
mne.combine_evoked(
all_evokeds, weights=[0.5, 0.5, -0.5, -0.5]).plot_joint(**joint_kwargs)
###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.
# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
# And they can be written to disk like any other evoked data, e.g.:
# mne.write_evokeds('tmp-ave.fif', all_evokeds)
# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])
# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
all_evokeds[cond].plot_joint(title=cond, **joint_kwargs)
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
wateraccounting/wa | Functions/Two/Calc_NDM.py | 1 | 4771 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Function/Two
"""
# import general python modules
import os
import gdal
import numpy as np
import pandas as pd
import glob
def NPP_GPP_Based(Dir_Basin, Data_Path_GPP, Data_Path_NPP, Startdate, Enddate):
"""
This functions calculated monthly NDM based on the yearly NPP and monthly GPP.
Parameters
----------
Dir_Basin : str
Path to all the output data of the Basin
Data_Path_GPP : str
Path from the Dir_Basin to the GPP data
Data_Path_NPP : str
Path from the Dir_Basin to the NPP data
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Simulation : int
Defines the simulation
Returns
-------
Data_Path_NDM : str
Path from the Dir_Basin to the normalized dry matter data
"""
# import WA+ modules
import wa.General.data_conversions as DC
import wa.General.raster_conversions as RC
# Define output folder for Normalized Dry Matter
Data_Path_NDM = os.path.join(Dir_Basin, "NDM")
if not os.path.exists(Data_Path_NDM):
os.mkdir(Data_Path_NDM)
# Define monthly time steps that will be created
Dates = pd.date_range(Startdate, Enddate, freq = 'MS')
# Define the years that will be calculated
Year_Start = int(Startdate[0:4])
Year_End = int(Enddate[0:4])
Years = range(Year_Start, Year_End+1)
# Loop over the years
for year in Years:
# Change working directory to the NPP folder
os.chdir(Data_Path_NPP)
# Open yearly NPP data
yearly_NPP_File = glob.glob('*yearly*%d.01.01.tif' %int(year))[0]
Yearly_NPP = RC.Open_tiff_array(yearly_NPP_File)
# Get the No Data Value of the NPP file
dest = gdal.Open(yearly_NPP_File)
NDV = dest.GetRasterBand(1).GetNoDataValue()
# Set the No Data Value to Nan
Yearly_NPP[Yearly_NPP == NDV] = np.nan
# Change working directory to the GPP folder
os.chdir(Data_Path_GPP)
# Find all the monthly files of that year
monthly_GPP_Files = glob.glob('*monthly*%d.*.01.tif' %int(year))
# Check if it are 12 files otherwise something is wrong and send the ERROR
if not len(monthly_GPP_Files) == 12:
print 'ERROR: Some monthly GPP Files are missing'
# Get the projection information of the GPP inputs
geo_out, proj, size_X, size_Y = RC.Open_array_info(monthly_GPP_Files[0])
geo_out_NPP, proj_NPP, size_X_NPP, size_Y_NPP = RC.Open_array_info(os.path.join(Data_Path_NPP,yearly_NPP_File))
if int(proj.split('"')[-2]) == 4326:
proj = "WGS84"
# Get the No Data Value of the GPP files
dest = gdal.Open(monthly_GPP_Files[0])
NDV = dest.GetRasterBand(1).GetNoDataValue()
# Create a empty numpy array
Yearly_GPP = np.zeros([size_Y, size_X])
# Calculte the total yearly GPP
for monthly_GPP_File in monthly_GPP_Files:
# Open array
Data = RC.Open_tiff_array(monthly_GPP_File)
# Remove nan values
Data[Data == NDV] = np.nan
# Add data to yearly sum
Yearly_GPP += Data
# Check if size is the same of NPP and GPP otherwise resize
if not (size_X_NPP is size_X or size_Y_NPP is size_Y):
Yearly_NPP = RC.resize_array_example(Yearly_NPP, Yearly_GPP)
# Loop over the monthly dates
for Date in Dates:
# If the Date is in the same year as the yearly NPP and GPP
if Date.year == year:
# Create empty GPP array
monthly_GPP = np.ones([size_Y, size_X]) * np.nan
# Get current month
month = Date.month
# Get the GPP file of the current year and month
monthly_GPP_File = glob.glob('*monthly_%d.%02d.01.tif' %(int(year), int(month)))[0]
monthly_GPP = RC.Open_tiff_array(monthly_GPP_File)
monthly_GPP[monthly_GPP == NDV] = np.nan
# Calculate the NDM based on the monthly and yearly NPP and GPP (fraction of GPP)
Monthly_NDM = Yearly_NPP * monthly_GPP / Yearly_GPP * (30./12.) *10000 # kg/ha
# Define output name
output_name = os.path.join(Data_Path_NDM, 'NDM_MOD17_kg_ha-1_monthly_%d.%02d.01.tif' %(int(year), int(month)))
# Save the NDM as tiff file
DC.Save_as_tiff(output_name, Monthly_NDM, geo_out, proj)
return(Data_Path_NDM)
| apache-2.0 |
madan96/sympy | sympy/physics/quantum/circuitplot.py | 28 | 12934 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u'M_z'
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u'M_x'
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
maxlikely/scikit-learn | examples/plot_hmm_stock_analysis.py | 12 | 2783 | """
==========================
Gaussian HMM of stock data
==========================
This script shows how to use Gaussian HMM.
It uses stock price data, which can be obtained from yahoo finance.
For more information on how to get stock prices with matplotlib, please refer
to date_demo1.py of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
import pylab as pl
from matplotlib.finance import quotes_historical_yahoo
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from sklearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Downloading the data
date1 = datetime.date(1995, 1, 1) # start date
date2 = datetime.date(2012, 1, 6) # end date
# get quotes from yahoo finance
quotes = quotes_historical_yahoo("INTC", date1, date2)
if len(quotes) == 0:
raise SystemExit
# unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# take diff of close value
# this makes len(diff) = len(close_t) - 1
# therefore, others quantity also need to be shifted
diff = close_v[1:] - close_v[:-1]
dates = dates[1:]
close_v = close_v[1:]
# pack diff and volume for training
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end='')
n_components = 5
# make an HMM instance and execute fit
model = GaussianHMM(n_components, covariance_type="diag", n_iter=1000)
model.fit([X])
# predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done\n")
###############################################################################
# print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("means and vars of each hidden state")
for i in range(n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
fig = pl.figure()
ax = fig.add_subplot(111)
for i in range(n_components):
# use fancy indexing to plot data in each state
idx = (hidden_states == i)
ax.plot_date(dates[idx], close_v[idx], 'o', label="%dth hidden state" % i)
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = lambda x: '$%1.2f' % x
ax.grid(True)
fig.autofmt_xdate()
pl.show()
| bsd-3-clause |
google/wikiloop-analysis | cross-edits-analysis/author_analytics.py | 1 | 2978 | '''
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Date: 6/11/2020
Author: Haoran Fei
Script to perform author-specific analytics, as outlined in part II of Preliminary Data
Analysis Planning.
'''
import sys
import getopt
#import pandas as pd
import matplotlib.pyplot as plt
import engine
import os
def main(argv):
'''Main routine to load files, compute aggregate statistics, per-author statistics and
sliding window analysis.'''
author_analysis_engine = engine.Engine()
author_analysis_engine.get_command_line_input(argv)
author_analysis_engine.set_key("author", "author")
author_analysis_engine.open_log_file()
author_analysis_engine.display_aggregate_stats()
#author_analysis_engine.iterate_per_key(author_analysis_engine.display_per_group_stats)
#author_analysis_engine.iterate_per_key(author_analysis_engine.plot_evolution_across_time)
author_analysis_engine.iterate_per_key(author_analysis_engine.sliding_window_analysis)
authors_with_non_zero_scores = []
means = dict()
medians = dict()
columns = author_analysis_engine.columns_to_count
for column in columns:
means[column] = []
medians[column] = []
def compute_mean_and_median_non_zero(group, group_key, index):
# Get the edits with non-zero ores score for time-series analysis
non_zero_authors = group.loc[group["ores_damaging"] != 0].copy()
non_zero_count = non_zero_authors.shape[0]
if non_zero_count != 0:
authors_with_non_zero_scores.append(group_key)
for column in columns:
means[column].append(group[column].mean())
medians[column].append(group[column].median())
author_analysis_engine.iterate_per_key(compute_mean_and_median_non_zero)
# Distribution of mean and median scores across authors
fig, axes = plt.subplots(2, len(columns))
fig.set_size_inches(37, 21)
for i in range(len(columns)):
axes[0][i].hist(means[columns[i]], bins=50)
axes[0][i].set_title("Mean of {} across all authors".format(columns[i]))
axes[1][i].hist(medians[columns[i]], bins=50)
axes[1][i].set_title("Median of {} across all authors".format(columns[i]))
plt.savefig("./graphs/aggregate/Mean_median_all_authors_all_columns_no_zero.png")
plt.close()
author_analysis_engine.cleanup()
if __name__ == "__main__":
main(sys.argv[1:]) | apache-2.0 |
robcarver17/systematictradingexamples | plots_for_perhaps/correlatedreturns.py | 1 | 2880 |
import Image
from random import gauss
import numpy as np
from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, scatter
import matplotlib.pylab as plt
from itertools import cycle
import pickle
import pandas as pd
lines = ["--","-","-."]
linecycler = cycle(lines)
def twocorrelatedseries(no_periods, period_mean, period_mean2, period_vol, corr):
means = [period_mean, period_mean2]
stds = [period_vol]*2
covs = [[stds[0]**2 , stds[0]*stds[1]*corr],
[stds[0]*stds[1]*corr, stds[1]**2]]
m = np.random.multivariate_normal(means, covs, no_periods).T
data1=m[0]
data2=m[1]
return np.mean(data1) - np.mean(data2)
## path to difference for one thing no correlation
months_in_year=12
annual_vol=0.150
monthly_vol=annual_vol/(months_in_year**.5)
annual_SR=0.66
annual_SR2=0.46
diffSR=annual_SR - annual_SR2
annual_return=annual_vol*annual_SR
annual_return2=annual_vol*annual_SR2
monthly_mean=annual_return/months_in_year
monthly_mean2=annual_return2/months_in_year
## Make sure these match!
no_years=10
no_periods=months_in_year*no_years
monte_carlos=500000
corr=0.85
rollmeandiff=[twocorrelatedseries(no_periods, monthly_mean, monthly_mean2, monthly_vol, corr=corr) for ii in range(monte_carlos)]
rollanndiff=[x*12 for x in rollmeandiff]
rollannSR=[x/annual_vol for x in rollanndiff]
def linehist(x, color="blue", linestyle="-", bins=10, linewidth=1):
y,binEdges =np.histogram(x, bins=bins)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
plot(bincenters,y,'-', color=color, linestyle=linestyle, linewidth=linewidth)
linehist(rollanndiff, bins=50, linewidth=2)
frame=plt.gca()
frame.get_yaxis().set_visible(False)
frame.set_xlim([-0.07, 0.13])
frame.set_xticks([-0.05, 0.00, 0.05,.1])
frame.set_ylim([0,50000])
frame.annotate("Expected improvement in returns", xy=(0.03, 38000),xytext=(0.05, 45000.0), arrowprops=dict(facecolor='black', shrink=0.05), size=18)
frame.annotate("Breakeven in costs", xy=(0.01, 28000),xytext=(-0.05, 40000.0), arrowprops=dict(facecolor='black', shrink=0.05), size=18)
plt.axvline(0.01, linestyle="--")
plt.axvline(0.03, linestyle="--")
#xlabel("Difference in annual % returns between managers")
rcParams.update({'font.size': 18})
def file_process(filename):
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig("/home/rob/%s.png" % filename,dpi=300)
fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50)
Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename)
Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename)
file_process("correlateddifferences")
show()
print(sum([1.0 for x in rollanndiff if x<0.01])/len(rollanndiff))
print(np.std(rollanndiff)) | gpl-2.0 |
lcameron05/PCWG | pcwg/core/turbine.py | 2 | 29788 | import math
import interpolators
import scipy.interpolate
import numpy as np
import pandas as pd
from ..core.status import Status
from empirical_turbulence import AugmentedTurbulenceCorrection
class Relaxation(object):
def __init__(self, correction):
self.correction = correction
def relax(self, wind_speed, turbulence):
return self.correction * turbulence
class NoRelaxation(object):
def relax(self, wind_speed,turbulence):
# suppress unused parameter message in PyCharm
_ = wind_speed
return turbulence
class PowerCurve(object):
def __init__(self,
rotor_geometry,
reference_density,
data_frame,
wind_speed_column,
turbulence_column,
power_column,
count_column=None,
rated_power=None,
name='Undefined',
interpolation_mode='Cubic Spline',
zero_ti_pc_required=False,
x_limits=None,
sub_power=None,
relaxation=NoRelaxation()):
self.name = name
self.interpolation_mode = interpolation_mode
self.reference_density = reference_density
self.data_frame = data_frame
self.wind_speed_column = wind_speed_column
self.turbulence_column = turbulence_column
self.power_column = power_column
self.count_column = count_column
self.x_limits = x_limits
self.sub_power = sub_power
self.rotor_geometry = rotor_geometry
if self.count_column is not None:
self.hours = self.data_frame[count_column].sum()*1.0/6.0
else:
self.hours = None
wind_data = data_frame[self.wind_speed_column]
power_data = data_frame[self.power_column]
self.first_wind_speed = min(wind_data)
self.cut_in_wind_speed = self.calculate_cut_in_wind_speed()
self.cut_out_wind_speed = self.calculate_cut_out_wind_speed()
self.wind_speed_points, self.power_points = self.extract_points(wind_data, power_data)
self.turbulence_function = self.create_one_dimensional_function(self.wind_speed_column,
self.turbulence_column,
supress_negative=True)
self.available_power = AvailablePower(self.rotor_geometry, self.reference_density)
Status.add("calculating power function ({0})".format(self.interpolation_mode), verbosity=3)
self.power_function = self.create_power_function(self.wind_speed_points, self.power_points)
Status.add("power function calculated ({0})".format(type(self.power_function)), verbosity=3)
self.rated_power = self.get_rated_power(rated_power, data_frame[self.power_column])
self._reverted_relaxation = None
self._reverted_simulated_power = None
self._reverted_zero_turbulence_power_curve = None
self.relaxation = relaxation
self.zero_ti_pc_required = zero_ti_pc_required
@property
def zero_ti_pc_required(self):
return self._zero_ti_pc_required
@zero_ti_pc_required.setter
def zero_ti_pc_required(self, value):
if hasattr(self, '_zero_ti_pc_required'):
update = (self._zero_ti_pc_required != value)
else:
update = True
if update:
if value and (self.reference_density is None):
raise Exception("Zero Turbulence Curve cannot be calculated"
" if turbine does not have a well defined density")
self._zero_ti_pc_required = value
self.update_zero_ti()
def get_raw_levels(self):
padded_levels = (self.data_frame['Is Extrapolation'] == True)
return self.data_frame[~padded_levels]
def revert_zero_ti(self):
if self._reverted_zero_turbulence_power_curve is None:
raise Exception('Cannot revert zero turbulence power curve')
self.relaxation = self._reverted_relaxation
self.simulatedPower = self._reverted_simulated_power
self.zeroTurbulencePowerCurve = self._reverted_zero_turbulence_power_curve
self._reverted_relaxation = None
self._reverted_simulated_power = None
self._reverted_zero_turbulence_power_curve = None
def update_zero_ti(self, relaxation=None):
self._reverted_relaxation = self.relaxation
if hasattr(self, 'simulatedPower'):
self._reverted_simulated_power = self.simulatedPower
if hasattr(self, 'zeroTurbulencePowerCurve'):
self._reverted_zero_turbulence_power_curve = self.zeroTurbulencePowerCurve
Status.add("Zero TI Required: {0}".format(self.zero_ti_pc_required), verbosity=3)
if relaxation is not None:
self.relaxation = relaxation
if self.zero_ti_pc_required:
Status.add("Calculating zero turbulence curve for {0} Power Curve".format(self.name), verbosity=3)
try:
self.calculate_zero_turbulence_power_curve()
Status.add("Calculation of zero turbulence curve for {0}"
" Power Curve successful".format(self.name), verbosity=3)
except None as error:
err_msg = "Calculation of zero turbulence curve for {0}" \
" Power Curve unsuccessful: {1}".format(self.name, error)
raise Exception(err_msg)
else:
self.zeroTurbulencePowerCurve = None
self.simulatedPower = None
Status.add("Turbine Created Successfully", verbosity=3)
def get_level(self, wind_speed, tolerance=0.00001):
for i in range(len(self.wind_speed_points)):
diff = abs(self.wind_speed_points[i] - wind_speed)
if diff < tolerance:
return self.power_points[i]
raise Exception("Cannot find level: {0}".format(wind_speed))
def calculate_zero_turbulence_power_curve(self):
integration_range = IntegrationRange(0.0, 100.0, 0.1)
wind_speeds = []
powers = []
turbulence_values = []
for index in self.data_frame.index:
wind_speed = self.data_frame.loc[index, self.wind_speed_column]
power = self.data_frame.loc[index, self.power_column]
turbulence = self.data_frame.loc[index, self.turbulence_column]
if not np.isnan(wind_speed) and \
not np.isnan(power) and \
not np.isnan(turbulence) and \
wind_speed >= 0.0 and \
power >= 0.0 and \
turbulence > 0:
wind_speeds.append(wind_speed)
turbulence_values.append(turbulence)
powers.append(power)
self.zeroTurbulencePowerCurve = ZeroTurbulencePowerCurve(wind_speeds,
powers,
turbulence_values,
integration_range,
self.available_power,
self.reference_density,
self.relaxation)
self.simulatedPower = SimulatedPower(self.zeroTurbulencePowerCurve, integration_range)
def get_rated_power(self, rated_power, power_curve_levels):
if rated_power is None:
return power_curve_levels.max()
else:
return rated_power
def get_threshold_wind_speed(self):
return float(interpolators.LinearPowerCurveInterpolator(self.power_points, self.wind_speed_points,
self.rated_power)(0.85 * self.rated_power) * 1.5)
def get_turbulence_levels(self, power_curve_levels, turbulence_levels, fixed_turbulence):
if fixed_turbulence is not None:
turbulence_levels = pd.Series(index=power_curve_levels.index)
for level in power_curve_levels.index:
turbulence_levels[level] = fixed_turbulence
else:
turbulence_levels = turbulence_levels
return turbulence_levels
def create_one_dimensional_function(self, x_col, y_col, supress_negative=True):
x, y = [], []
for index in self.data_frame.index:
x_value = self.data_frame.loc[index, x_col]
y_value = self.data_frame.loc[index, y_col]
if (not np.isnan(x_value)) and (not np.isnan(y_value)):
if (not supress_negative) or y_value > 0:
x.append(x_value)
y.append(y_value)
return interpolators.LinearTurbulenceInterpolator(x, y)
def extract_points(self, x_data, y_data):
if x_data is None:
x_data = pd.Series(y_data.index, index=y_data.index)
x, y = [], []
Status.add("Preparing input points", verbosity=3)
for i in y_data.index:
if i in x_data.index and not np.isnan(x_data[i]):
x_val = x_data[i]
else:
x_val = i
y_val = y_data[i]
if (not np.isnan(x_val)) and (not np.isnan(y_val)):
x.append(x_val)
y.append(y_val)
Status.add("{0} {1} {2}".format(i, x[-1], y[-1]), verbosity=3)
return x, y
def create_power_function(self, x, y):
Status.add("Creating interpolator", verbosity=3)
if self.interpolation_mode == 'Linear':
return interpolators.LinearPowerCurveInterpolator(x, y, self.cut_out_wind_speed)
elif self.interpolation_mode == 'Cubic' or self.interpolation_mode == 'Cubic Spline':
return interpolators.CubicSplinePowerCurveInterpolator(x, y, self.cut_out_wind_speed)
elif self.interpolation_mode == 'Cubic Hermite':
return interpolators.CubicHermitePowerCurveInterpolator(x, y, self.cut_out_wind_speed)
elif self.interpolation_mode == 'Marmander' or self.interpolation_mode == 'Marmander (Cubic Spline)':
return interpolators.MarmanderPowerCurveInterpolatorCubicSpline(x,
y,
self.cut_out_wind_speed,
x_limits=self.x_limits,
sub_power=self.sub_power)
elif self.interpolation_mode == 'Marmander (Cubic Hermite)':
return interpolators.MarmanderPowerCurveInterpolatorCubicHermite(x,
y,
self.cut_out_wind_speed,
x_limits=self.x_limits,
sub_power=self.sub_power)
else:
raise Exception('Unknown interpolation mode: {0}'.format(self.interpolation_mode))
def power(self, wind_speed, turbulence=None, augment_turbulence_correction=False, normalised_wind_speed=None):
if augment_turbulence_correction and normalised_wind_speed is None:
raise Exception('normalised_wind_speed cannot be None if augment_turbulence_correction=True')
reference_power = self.power_function(wind_speed)
if turbulence is None:
power = reference_power
else:
reference_turbulence = self.reference_turbulence(wind_speed)
simulated_power_site = self.simulatedPower.power(wind_speed,
self.relaxation.relax(wind_speed,
turbulence))
simulated_power_reference = self.simulatedPower.power(wind_speed,
self.relaxation.relax(wind_speed,
reference_turbulence))
correction = simulated_power_site - simulated_power_reference
power = reference_power + correction
if augment_turbulence_correction:
deviation = self.augment_turbulence_correction(normalised_wind_speed,
turbulence,
reference_turbulence)
power *= (1.0 + deviation)
power = max([0.0, power])
power = min([self.rated_power, power])
return power
def augment_turbulence_correction(self, normalised_wind_speed, turbulence, reference_turbulence):
empirical = AugmentedTurbulenceCorrection()
return empirical.calculate(normalised_wind_speed, turbulence, reference_turbulence)
def reference_turbulence(self, wind_speed):
if wind_speed < self.first_wind_speed:
return self.turbulence_function(self.first_wind_speed)
elif wind_speed > self.cut_out_wind_speed:
return self.turbulence_function(self.cut_out_wind_speed)
else:
return self.turbulence_function(wind_speed)
def calculate_cut_in_wind_speed(self):
return min(self.non_zero_levels())
def calculate_cut_out_wind_speed(self):
return max(self.non_zero_levels())
def non_zero_levels(self):
levels = []
for index in self.data_frame.index:
power = self.data_frame.loc[index, self.power_column]
speed = self.data_frame.loc[index, self.wind_speed_column]
if not np.isnan(power) and power > 0.0:
levels.append(speed)
return levels
def __str__(self):
value = "Wind Speed\tPower\n"
for wind_speed in self.wind_speed_points:
value += "%0.2f\t%0.2f\n" % (wind_speed, self.power(wind_speed))
return value
class RotorGeometry:
def __init__(self, diameter, hub_height, tilt=None):
if diameter is None:
raise Exception('Diameter is not set')
if hub_height is None:
raise Exception('Hub Height is not set')
self.diameter = diameter
self.radius = diameter / 2
self.area = math.pi * self.radius ** 2
self.hub_height = hub_height
self.lower_tip = self.hub_height - self.radius
self.upper_tip = self.hub_height + self.radius
self.tilt = tilt
def within_rotor(self, height):
return (height >= self.lower_tip) and (height <= self.upper_tip)
class IntegrationProbabilities:
def __init__(self, wind_speeds, wind_speed_step):
# speed optimised normal distribution
self.wind_speeds = wind_speeds
self.a = wind_speed_step / math.sqrt(2.0 * math.pi)
def probabilities(self, wind_speed_mean, wind_speed_std__dev):
if wind_speed_std__dev == 0:
return np.nan
one_over_standard_deviation = 1.0 / wind_speed_std__dev
one_over_standard_deviation_sq = one_over_standard_deviation * one_over_standard_deviation
b = self.a * one_over_standard_deviation
c = -0.5 * one_over_standard_deviation_sq
wind_speed_minus_means = (self.wind_speeds - wind_speed_mean)
wind_speed_minus_mean_sq = wind_speed_minus_means * wind_speed_minus_means
d = c * wind_speed_minus_mean_sq
return b * np.exp(d)
class IntegrationRange:
def __init__(self, minimum_wind_speed, maximum_wind_speed, wind_speed_step):
self.minimum_wind_speed = minimum_wind_speed
self.maximum_wind_speed = maximum_wind_speed
self.wind_speed_step = wind_speed_step
self.wind_speeds = np.arange(minimum_wind_speed, maximum_wind_speed, wind_speed_step)
self.integrationProbabilities = IntegrationProbabilities(self.wind_speeds, self.wind_speed_step)
def probabilities(self, wind_speed_mean, wind_speed_std_dev):
return self.integrationProbabilities.probabilities(wind_speed_mean, wind_speed_std_dev)
class AvailablePower(object):
def __init__(self, rotor_geometry, density):
self.area = rotor_geometry.area
self.density = density
def power(self, wind_speed):
return 0.5 * self.density * self.area * wind_speed * wind_speed * wind_speed / 1000.0
def power_coefficient(self, wind_speed, actual_power):
power = self.power(wind_speed)
if power > 0:
return actual_power / self.power(wind_speed)
else:
return 0.0
class ZeroTurbulencePowerCurve(object):
def __init__(self,
reference_wind_speeds,
reference_powers,
reference_turbulence_values,
integration_range,
available_power,
density,
relaxation):
self.integration_range = integration_range
self.initial_zero_turbulence_power_curve = InitialZeroTurbulencePowerCurve(reference_wind_speeds,
reference_powers,
reference_turbulence_values,
integration_range,
available_power,
density,
relaxation)
simulated_reference_power_curve = SimulatedPowerCurve(reference_wind_speeds,
self.initial_zero_turbulence_power_curve,
reference_turbulence_values,
integration_range,
relaxation)
self.wind_speeds = reference_wind_speeds
self.powers = []
self.min_wind_speed = None
self.last_wind_speed = None
self.last_power = None
for i in range(len(self.wind_speeds)):
correct_to_zero_turbulence = (-simulated_reference_power_curve.powers[i]
+ self.initial_zero_turbulence_power_curve.powers[i])
power = reference_powers[i] + correct_to_zero_turbulence
if reference_powers[i] > 0:
if self.last_wind_speed is None or self.wind_speeds[i] > self.last_wind_speed:
self.last_wind_speed = self.wind_speeds[i]
self.last_power = power
self.powers.append(power)
self.powerFunction = scipy.interpolate.interp1d(self.wind_speeds, self.powers)
self.zero_ti_rated_power = self.initial_zero_turbulence_power_curve.rated_power
self.zero_ti_rated_wind_speed = self.initial_zero_turbulence_power_curve.rated_wind_speed
self.zero_ti_cut_in_wind_speed = self.initial_zero_turbulence_power_curve.cut_in_wind_speed
self.min_wind_speed = min(self.wind_speeds)
self.df_power_levels = pd.DataFrame(self.powers, index=self.wind_speeds, columns=['Power'])
def power(self, wind_speed):
if wind_speed < self.min_wind_speed:
return 0.0
elif wind_speed > self.last_wind_speed:
return self.last_power
else:
return self.powerFunction(wind_speed)
class InitialZeroTurbulencePowerCurve(object):
def __init__(self,
reference_wind_speeds,
reference_powers,
reference_turbulence_values,
integration_range,
available_power,
density,
relaxation):
self.max_iterations = 5
self.density = density
self.integration_range = integration_range
self.available_power = available_power
self.reference_wind_speeds = reference_wind_speeds
self.reference_powers = reference_powers
self.reference_turbulence_values = reference_turbulence_values
self.relaxation = relaxation
self.reference_power_curve_stats = IterationPowerCurveStats(reference_wind_speeds,
reference_powers,
available_power)
self.selected_stats = self.solve(self.reference_power_curve_stats)
selected_iteration = InitialZeroTurbulencePowerCurveIteration(reference_wind_speeds,
self.available_power,
self.selected_stats.rated_power,
self.selected_stats.cut_in_wind_speed,
self.selected_stats.cp_max,
self.density)
self.rated_wind_speed = selected_iteration.rated_wind_speed
self.rated_power = selected_iteration.rated_power
self.cut_in_wind_speed = selected_iteration.cut_in_wind_speed
self.wind_speeds = selected_iteration.wind_speeds
self.powers = selected_iteration.powers
self.power = selected_iteration.power
def solve(self, previous_iteration_stats, iteration_count=1):
if iteration_count > self.max_iterations:
raise Exception("Failed to solve initial zero turbulence curve in permitted number of iterations")
previous_rated_power = previous_iteration_stats.rated_power
previous_cut_in_wind_speed = previous_iteration_stats.cut_in_wind_speed
previous_cp_max = previous_iteration_stats.cp_max
iteration_zero_turbulence_curve = InitialZeroTurbulencePowerCurveIteration(self.integration_range.wind_speeds,
self.available_power,
previous_rated_power,
previous_cut_in_wind_speed,
previous_cp_max,
self.density)
iteration_simulated_curve = SimulatedPowerCurve(self.reference_wind_speeds,
iteration_zero_turbulence_curve,
self.reference_turbulence_values,
self.integration_range,
self.relaxation)
iteration_simulated_curve_stats = IterationPowerCurveStats(iteration_simulated_curve.wind_speeds,
iteration_simulated_curve.powers,
self.available_power)
convergence_check = IterationPowerCurveConvergenceCheck(self.reference_power_curve_stats,
iteration_simulated_curve_stats)
if convergence_check.isConverged:
return previous_iteration_stats
else:
incremented_stats = IncrementedPowerCurveStats(previous_iteration_stats, convergence_check)
return self.solve(incremented_stats, iteration_count + 1)
class IterationPowerCurveConvergenceCheck(object):
def __init__(self, reference_stats, iteration_stats):
self.threshold_power_diff = reference_stats.rated_power * 0.001
self.threshold_cut_in_wind_speed_diff = 0.5
self.threshold_cp_max_diff = 0.01
self.rated_power_diff = iteration_stats.rated_power - reference_stats.rated_power
self.cut_in_diff = iteration_stats.cut_in_wind_speed - reference_stats.cut_in_wind_speed
self.cp_max_diff = iteration_stats.cp_max - reference_stats.cp_max
self.rated_power_converged = abs(self.rated_power_diff) < self.threshold_power_diff
self.cut_in_converged = abs(self.cut_in_diff) <= self.threshold_cut_in_wind_speed_diff
self.cp_max_converged = abs(self.cp_max_diff) <= self.threshold_cp_max_diff
self.isConverged = self.rated_power_converged and self.cut_in_converged and self.cp_max_converged
class IncrementedPowerCurveStats(object):
def __init__(self, previous_iteration_stats, convergence_check):
if convergence_check.rated_power_converged:
self.rated_power = previous_iteration_stats.rated_power
else:
self.rated_power = previous_iteration_stats.rated_power - convergence_check.rated_power_diff
if convergence_check.cut_in_converged:
self.cut_in_wind_speed = previous_iteration_stats.cut_in_wind_speed
else:
self.cut_in_wind_speed = previous_iteration_stats.cut_in_wind_speed - convergence_check.cut_in_diff
if convergence_check.cp_max_converged:
self.cp_max = previous_iteration_stats.cp_max
else:
self.cp_max = previous_iteration_stats.cp_max - convergence_check.cp_max_diff
class InitialZeroTurbulencePowerCurveIteration(object):
def __init__(self, wind_speeds, available_power, rated_power, cut_in_wind_speed, cp_max, density):
self.wind_speeds = wind_speeds
self.powers = []
self.rated_wind_speed = ((2.0 * rated_power * 1000.0) /
(density * cp_max * available_power.area)) ** (1.0 / 3.0)
self.rated_power = rated_power
self.cut_in_wind_speed = cut_in_wind_speed
self.cp_max = cp_max
self.availablePower = available_power
for wind_speed in self.wind_speeds:
self.powers.append(self.power(wind_speed))
def power(self, wind_speed):
if wind_speed > self.cut_in_wind_speed:
if wind_speed < self.rated_wind_speed:
return self.availablePower.power(wind_speed) * self.cp_max
else:
return self.rated_power
else:
return 0.0
class IterationPowerCurveStats(object):
def __init__(self, wind_speeds, powers, available_power):
self.rated_power = max(powers)
threshold_power = self.rated_power * 0.001
operating_wind_speeds = []
cps = []
for i in range(len(wind_speeds)):
wind_speed = wind_speeds[i]
power = powers[i]
cps.append(available_power.power_coefficient(wind_speed, power))
if power >= threshold_power:
operating_wind_speeds.append(wind_speed)
self.cp_max = max(cps)
if len(operating_wind_speeds) > 0:
self.cut_in_wind_speed = min(operating_wind_speeds)
else:
self.cut_in_wind_speed = 0.0
class SimulatedPower(object):
def __init__(self, zero_turbulence_power_curve, integration_range):
self.zero_turbulence_power_curve = zero_turbulence_power_curve
self.integration_range = integration_range
integration_powers = []
for wind_speed in np.nditer(self.integration_range.wind_speeds):
integration_powers.append(self.zero_turbulence_power_curve.power(wind_speed))
self.integrationPowers = np.array(integration_powers)
def power(self, wind_speed, turbulence):
if wind_speed > 0:
standard_deviation = wind_speed * turbulence
integration_probabilities = self.integration_range.probabilities(wind_speed, standard_deviation)
return np.sum(integration_probabilities * self.integrationPowers) / np.sum(integration_probabilities)
else:
return 0.0
class SimulatedPowerCurve(object):
def __init__(self, wind_speeds, zero_turbulence_power_curve, turbulence_values, integration_range, relaxation):
self.simulated_power = SimulatedPower(zero_turbulence_power_curve, integration_range)
self.relaxation = relaxation
self.wind_speeds = wind_speeds
self.turbulence_values = turbulence_values
self.powers = []
for i in range(len(wind_speeds)):
wind_speed = wind_speeds[i]
turbulence = self.relaxation.relax(wind_speed,
turbulence_values[i])
power = self.simulated_power.power(wind_speed, turbulence)
self.powers.append(power)
| mit |
ronnyandersson/zignal | zignal/audio.py | 1 | 46119 | '''
Created on Dec 31, 2013
@author: Ronny Andersson ([email protected])
@copyright: (c) 2013 Ronny Andersson
@license: MIT
'''
# Standard library
import logging
import os
# Third party
import matplotlib.pyplot as plt
import numpy as np
import samplerate
import scipy.io.wavfile
import scipy.signal
# ==================================================================================================
# Classes
# ==================================================================================================
class Audio(object):
def __init__(self, channels=0, fs=96000, nofsamples=0, duration=None,
initialdata=None, dtype=np.float64):
"""Base class for audio processing. Samples are stored as a numpy array.
We can create an instance by specifying a channel count and one of either
a duration or a sample count parameter. The other way of creating an
instance is by providing an already existing numpy array containing the
audio samples.
The shape of the audio samples are always (Nsamples_per_channel, Nchannels).
"""
self._logger = logging.getLogger(__name__)
# We sometimes divide by the sample rate to get time values
assert fs > 0, "sample rate cannot be zero or negative"
self.fs = fs # sample rate should always be specified in the constructor
self.nofsamples = None # number of samples per channel
self.duration = None # duration (length) in seconds
self.ch = None # number of channels
self._comment = ''
if initialdata is None:
# if we are not given any initial samples we create an empty array of
# zeros for the audio samples.
assert isinstance(channels, int)
assert not(nofsamples != 0 and duration is not None), \
"choose either samples or duration"
self.ch = channels
if duration is not None:
self.nofsamples = int(duration*self.fs)
self.duration = duration
else:
self.nofsamples = nofsamples
self._set_duration()
# create space for the samples
self.samples = np.zeros((self.nofsamples, self.ch), dtype=dtype)
else:
# An array of initial samples are given, use this to extract
# channel count and durations.
assert isinstance(initialdata, np.ndarray), \
'Only numpy arrays are allowed as initial data'
assert channels == 0, \
"parameter 'channels' is redundant if initial data is specified"
assert nofsamples == 0, \
"parameter 'nofsamples' is redundant if initial data is specified"
assert duration is None, \
"parameter 'duration' is redundant if initial data is specified"
# copy the data to avoid unexpected data corruption
self.samples = initialdata.copy()
if self.samples.ndim == 1:
# if the array is
# array([ 1., 1., 1.])
# we expand it to
# array([[ 1.],
# [ 1.],
# [ 1.]])
#
self.samples = np.expand_dims(self.samples, axis=1)
assert self.samples.ndim == 2, 'shape must be (Nsamples, Nchannels)'
self.nofsamples, self.ch = self.samples.shape
# initial data is assumed to have more samples than channels
assert self.nofsamples > self.ch, 'shape must be (Nsamples, Nchannels)'
self._set_duration()
assert self.nofsamples is not None
assert self.duration is not None
assert self.ch is not None
def __str__(self):
s = '=======================================\n'
s += 'classname : %s\n' % self.__class__.__name__
s += 'sample rate : %.1f [Hz]\n' % self.fs
s += 'channels : %i\n' % self.ch
s += 'duration : %.3f [s]\n' % self.duration
s += 'datatype : %s\n' % self.samples.dtype
s += 'samples per ch : %i\n' % self.nofsamples
s += 'data size : %.3f [Mb]\n' % (self.samples.nbytes/(1024*1024))
s += 'has comment : %s\n' % ('yes' if len(self._comment) != 0 else 'no')
if self.ch != 0:
# += '-----------------:---------------------\n'
s += 'peak : %s\n' % np.array_str(self.peak()[0],
precision=4, suppress_small=True)
s += 'RMS : %s\n' % np.array_str(self.rms(),
precision=4, suppress_small=True)
s += 'crestfactor : %s\n' % np.array_str(self.crest_factor(),
precision=4, suppress_small=True)
s += '-----------------:---------------------\n'
return s
def __len__(self):
return self.nofsamples
def _set_duration(self):
"""internal method
If we have modified the samples variable (by padding with zeros
for example) we need to re-calculate the duration
"""
self.duration = self.nofsamples/self.fs
def _set_samples(self, idx=0, samples=None):
"""internal method
NOTE: idx != channel
idx is always zero indexed since it refers to the numpy array. Channels
are always indexed from one since this is the natural way of identifying
channel numbers.
"""
assert isinstance(samples, np.ndarray)
assert len(samples) == self.nofsamples
self.samples[:, idx] = samples
def copy(self):
"""deep:ish copy"""
return Audio(fs=self.fs, initialdata=self.samples)
def pretty_string_samples(self, idx_start=0, idx_end=20, precision=4, header=False):
s = ''
if header:
t = ' '
u = 'ch'
for i in range(self.ch):
t += '-------:'
u += ' %2i :' % (i+1)
t += '\n'
u += '\n'
s += t # --> -------:-------:-------:
s += u # --> ch 1 : 2 : 3 :
s += t # --> -------:-------:-------:
s += np.array_str(self.samples[idx_start:idx_end, :],
max_line_width=260, # we can print 32 channels before linewrap
precision=precision,
suppress_small=True)
if (idx_end-idx_start) < self.nofsamples:
s = s[:-1] # strip the right ']' character
s += '\n ...,\n'
lastlines = np.array_str(self.samples[-3:, :],
max_line_width=260,
precision=precision,
suppress_small=True)
s += ' %s\n' % lastlines[1:] # strip first '['
return s
def pad(self, nofsamples=0):
"""Zero pad *at the end* of the current audio data.
increases duration by samples/fs
"""
assert nofsamples >= 0, "Can't append negative number of samples"
zeros = np.zeros((nofsamples, self.ch), dtype=self.samples.dtype)
self.samples = np.append(self.samples, zeros, axis=0)
self.nofsamples = len(self.samples)
self._set_duration()
def iter_chunks(self, chunksize=1024):
"""
Splits the audio samples into chunks, to iterate over in block-based
processing. There are no restrictions on the chunk size, but in
practical implementations it is usually a power of two. This is not
a requirement here.
Chunks are sometimes called blocks, this is the same. So chunksize is
the same as blocksize.
"""
chunks = len(self.samples) // chunksize
missing = len(self.samples) % chunksize
self._logger.debug("chunksize : %i", chunksize)
self._logger.debug("data shape : %s", self.samples.shape)
self._logger.debug("chunks pre pad : %i", chunks)
self._logger.debug("missing (modulo) : %i", missing)
# If the data doesn't add up to a full chunk we need to pad with zeros but
# first we need to calculate how many samples are missing for a full chunk.
if missing:
missing_samples = chunksize - missing
self._logger.debug("missing (samples): %s", missing_samples)
# Pad with zeros, assuming that all channels are equally long in the
# first place. A new array is created since the original data should
# be kept unchanged. This means that this iterator is not very memory
# efficient. This can be avoided if the data is padded to add up to
# a multiple of the chunksize before this method is called. If this is
# acceptable (changing the original data) then this iterator is very
# memory efficient since only a new view of the original data is
# created (if possible, not guaranteed)
padded = np.concatenate([self.samples, np.zeros((missing_samples, self.ch))])
self._logger.debug("padded shape : %s", padded.shape)
else:
# Here the audio samples adds up to a multiple of the chunksize
self._logger.debug("*** No padding is needed")
padded = self.samples
padded_chunks = len(padded) // chunksize
self._logger.info("chunks (total) : %i", padded_chunks)
reshape = padded.reshape((padded_chunks, chunksize, self.ch))
# Now finally iterate over all the chunks
for i in range(padded_chunks):
curr_start = chunksize*i
curr_stop = curr_start + chunksize - 1
self._logger.debug("current slice : %10i %10i", curr_start, curr_stop)
yield reshape[i]
def is_empty(self):
"""Check if all samples in all channels are zero, then file is empty."""
return np.all(self.samples == 0)
def is_probably_empty(self, limit=-80):
"""Check if the absolute peak is below <limit> dB"""
peak, idx = self.peak()
peak = np.abs(peak)
self._logger.debug("abs(peak) is %s dB at %s sec",
np.array_str(lin2db(peak), precision=4, suppress_small=True),
np.array_str(idx/self.fs, precision=3, suppress_small=True),
)
return np.all(peak <= db2lin(limit))
def trim(self, start=None, end=None):
"""Trim samples **IN PLACE** """
self.samples = self.samples[start:end]
self.nofsamples = len(self.samples)
self._set_duration()
def trim_sec(self, start=None, end=None):
"""Trim (in seconds) **IN PLACE** """
self.trim(int(start*self.fs), int(end*self.fs))
def _fade(self, millisec, direction):
"""Internal method.
Fade in/out is essentially the same exept the slope (and position) of the
ramp. Currently only a linear ramp is implemented.
"""
assert np.issubdtype(self.samples.dtype, np.floating), \
"only floating point processing implemented"
assert millisec >= 0, "Got a time machine?"
assert direction in ("in", "out")
fade_seconds = millisec/1000
assert self.duration > fade_seconds, "fade cannot be longer than the length of the audio"
sample_count = int(np.ceil(fade_seconds*self.fs))
self._logger.debug("fade %s sample count: %i" % (direction, sample_count))
# generate the ramp
if direction == "out":
# ramp down
ramp = np.linspace(1, 0, num=sample_count, endpoint=True)
else:
# ramp up
ramp = np.linspace(0, 1, num=sample_count, endpoint=True)
ones = np.ones(len(self)-len(ramp))
# glue the ones and the ramp together
if direction == "out":
gains = np.append(ones, ramp, axis=0)
else:
gains = np.append(ramp, ones, axis=0)
# expand the dimension so we get a one channels array of samples,
# as in (samples, channels)
gains = np.expand_dims(gains, axis=1)
assert len(gains) == len(self)
# repeat the gain vector so we get as many gain channels as all the channels
gains = np.repeat(gains, self.ch, axis=1)
assert gains.shape == self.samples.shape
# apply gains
self.samples = self.samples * gains
def fade_in(self, millisec=10):
"""Fade in over 'millisec' seconds. Applies on *all* channels"""
self._fade(millisec, "in")
def fade_out(self, millisec=30):
"""Fade out over 'millisec' seconds. Applies on *all* channels"""
self._fade(millisec, "out")
def delay(self, n, channel=1):
"""Delay channel x by n samples"""
self.samples[:, channel-1] = \
np.pad(self.samples[:, channel-1], (n, 0), mode="constant")[:-n]
def get_time(self):
"""Return a vector of time values, starting with t0=0. Useful when plotting."""
return np.linspace(0, self.duration, num=self.nofsamples, endpoint=False)
def get_channel(self, channel):
assert channel != 0, "channel count starts at 1"
assert channel <= self.ch, \
"channel %i does not exist, %i channels available" % (channel, self.ch)
return Audio(fs=self.fs, initialdata=self.samples[:, channel-1])
def comment(self, comment=None):
"""Modify or return a string comment."""
assert isinstance(comment, (str, type(None))), "A comment is a string"
if comment is not None:
self._comment = comment
return self._comment
def to_mono(self):
"""Mix down to mono, reduces the channel count to 1. """
# FIXME: this only works on floats, not ints
# sum all samples, do the actual mix
samples_mono = np.sum(self.samples, axis=1)
# return a new instance since any subclass data is lost
mono = Audio(fs=self.fs, initialdata=samples_mono)
# Two correlated signals will have a combined gain of 2, so we need to
# reduce the gain to not overflow. We reduce the gain by 1 over the
# number of channels.
# 1/1 = 1.00 --> 0 [dB]
# 1/2 = 0.50 --> -6.02... [dB]
# 1/3 = 0.33... --> -9.54... [dB]
# 1/4 = 0.25 --> -12.04... [dB]
gain = lin2db(1/self.ch)
self._logger.debug("Total gain reduction: %.3f [dB]", gain)
mono.gain(gain)
return mono
def append(self, *args):
"""Add (append) channels *to the right* of the current audio data.
does zeropadding
increases channel count
"""
for i, other in enumerate(args):
assert isinstance(other, Audio), "only Audio() instances can be used"
self._logger.debug(
"** iteration %02i --> appending %s" % ((i+1), other.__class__.__name__))
assert self.fs == other.fs, "Sample rates must match (%s != %s)" % (self.fs, other.fs)
assert self.samples.dtype == other.samples.dtype, \
"Data types must match (%s != %s)" % (self.samples.dtype, other.samples.dtype)
max_nofsamples = max(self.nofsamples, other.nofsamples)
missingsamples = abs(self.nofsamples - other.nofsamples)
self._logger.debug("max nof samples: %i" % max_nofsamples)
self._logger.debug(
"appending %i new channel(s) and %i samples" % (other.ch, missingsamples))
if self.nofsamples > other.nofsamples:
self._logger.debug("self.nofsamples > other.nofsamples")
tmp = np.append(other.samples,
np.zeros(((missingsamples), other.ch), dtype=other.samples.dtype),
axis=0)
self.samples = np.append(self.samples, tmp, axis=1)
elif self.nofsamples < other.nofsamples:
self._logger.debug("self.nofsamples < other.nofsamples")
tmp = np.append(self.samples,
np.zeros(((missingsamples), self.ch), dtype=self.samples.dtype),
axis=0)
self.samples = np.append(tmp, other.samples, axis=1)
else:
self._logger.debug("self.nofsamples == other.nofsamples")
self.samples = np.append(self.samples, other.samples, axis=1)
self.ch = self.ch+other.ch
self.nofsamples = max_nofsamples
self._set_duration()
def concat(self, *args):
"""Concatenate (append) samples *after* the current audio data.
example:
x1 = 1234
x2 = 5678
x1.concat(x2) --> 12345678
"""
for i, other in enumerate(args):
assert isinstance(other, Audio), "only Audio() instances can be used"
self._logger.debug(
"** iteration %02i --> appending %s" % ((i+1), other.__class__.__name__))
assert self.fs == other.fs, "Sample rates must match (%s != %s)" % (self.fs, other.fs)
assert self.samples.dtype == other.samples.dtype, \
"Data types must match (%s != %s)" % (self.samples.dtype, other.samples.dtype)
assert self.ch == other.ch, "channel count must match"
self.samples = np.append(self.samples, other.samples, axis=0)
self.nofsamples = len(self.samples)
self._set_duration()
def gain(self, *args):
"""Apply gain to the audio samples. Always specify gain values in dB.
Converts **IN PLACE**
"""
self._logger.debug('gains: %s' % str(args))
dt = self.samples.dtype
lin = db2lin(args)
# apply the (linear) gain
self.samples = lin*self.samples
# make sure that the data type is retained
self.samples = self.samples.astype(dt)
def rms(self):
"""Calculate the RMS (Root Mean Square) value of the audio
data. Returns the RMS value for each individual channel
"""
if not (self.samples == 0).all():
if np.issubdtype(self.samples.dtype, np.floating):
rms = np.sqrt(np.mean(np.power(self.samples, 2), axis=0))
else:
# use a bigger datatype for ints since we most likely will
# overflow when calculating to the power of 2
bigger = np.asarray(self.samples, dtype=np.int64)
rms = np.sqrt(np.mean(np.power(bigger, 2), axis=0))
elif len(self.samples) == 0:
# no samples are set but channels are configured
rms = np.zeros(self.ch)
rms[:] = float('nan')
else:
rms = np.zeros(self.ch)
return rms
def peak(self):
"""Calculate peak sample value (with sign)"""
if len(self.samples) != 0:
if np.issubdtype(self.samples.dtype, np.floating):
idx = np.absolute(self.samples).argmax(axis=0)
else:
# We have to be careful when checking two's complement since the absolute value
# of the smallest possible value can't be represented without overflowing. For
# example: signed 16bit has range [-32768, 32767] so abs(-32768) cannot be
# represented in signed 16 bits --> use a bigger datatype
bigger = np.asarray(self.samples, dtype=np.int64)
idx = np.absolute(bigger).argmax(axis=0)
peak = np.array([self.samples[row, col] for col, row in enumerate(idx)])
else:
# no samples are set but channels are configured
idx = np.zeros(self.ch, dtype=np.int64)
peak = np.zeros(self.ch)
peak[:] = float('nan')
return peak, idx
def crest_factor(self):
"""Calculate the Crest Factor (peak over RMS) value of the
audio. Returns the crest factor value for each channel.
Some common crest factor values:
sine : 1.414...
MLS : 1 (if no emphasis filter is applied)
impulse : very high. The value gets higher the longer
the length of the audio data.
square : 1 (ideal square)
zeros : NaN (we cannot calculate 0/0)
"""
rms = self.rms()
assert len(rms) != 0
with np.errstate(invalid='ignore'):
# if the rms is zero we will get division errors. Ignore them.
if len(self.samples) != 0:
crest = np.abs(self.samples).max(axis=0)/rms
else:
# no samples are set but channels are configured
crest = np.zeros(self.ch)
crest[:] = float('nan')
return crest
def convert_to_integer(self, targetbits=16):
"""Scale floating point values between [-1.0, 1.0] to the equivalent
signed integer value. Converts **IN PLACE**
Note: 24 bit signed integers and 8 bit unsigned integers currently unsupported.
"""
assert targetbits in (8, 16, 32, 64)
assert self.samples.dtype in (np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64)
dt = {
8 : 'int8',
16 : 'int16',
32 : 'int32',
64 : 'int64',
}
sourcebits = self.samples.itemsize * 8
if self.samples.dtype in (np.float32, np.float64):
self._logger.debug(
"source is %02i bits (float), target is %2i bits (integer)"
% (sourcebits, targetbits))
self.samples = np.array(self.samples*(2**(targetbits-1)-1),
dtype=dt.get(targetbits))
else:
self._logger.debug(
"source is %02i bits (integer), target is %2i bits (integer)"
% (sourcebits, targetbits))
raise NotImplementedError("TODO: implement scale int->int")
def convert_to_float(self, targetbits=64):
"""Scale integer values to equivalent floating point values
between [-1.0, 1.0]. Converts **IN PLACE**
"""
assert targetbits in (32, 64)
assert self.samples.dtype in (np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64)
dt = {32 : 'float32',
64 : 'float64'}
sourcebits = self.samples.itemsize * 8
if self.samples.dtype in (np.int8, np.int16, np.int32, np.int64):
self._logger.debug(
"source is %02i bits (integer), target is %2i bits (float)"
% (sourcebits, targetbits))
self.samples = np.array(self.samples/(2**(sourcebits-1)), dtype=dt.get(targetbits))
else:
self._logger.debug(
"source is %02i bits (float), target is %2i bits (float)"
% (sourcebits, targetbits))
self.samples = np.array(self.samples, dtype=dt.get(targetbits))
def write_wav_file(self, filename=None):
"""Save audio data to .wav file."""
assert filename is not None, "Specify a filename, for example 'filename=audio.wav'"
self._logger.debug("writing file %s" % filename)
if self.samples.dtype == np.float64:
self._logger.warn("datatype is %s" % self.samples.dtype)
try:
scipy.io.wavfile.write(filename, int(self.fs), self.samples)
except: # noqa: E722
self._logger.exception("Could not write file: '%s'" % filename)
def plot(self, ch=1, plotname=None, plotrange=(None, None), **kwargs):
"""Plot the audio data on a time domain plot.
example:
x1 = Sinetone(f0=0.2, fs=10, nofsamples=50)
x1.plot(linestyle='--', marker='x', color='r', label='sine at 0.2Hz')
"""
if ch != 'all':
assert ch-1 < self.ch, "channel does not exist"
if plotrange[0] is None:
plotrange = (0, plotrange[1])
if plotrange[1] is None:
plotrange = (plotrange[0], self.duration)
assert plotrange[0] >= 0 and plotrange[1] <= self.duration, "plotrange is out of bounds"
assert plotrange[0] <= plotrange[1], "malformed plotrange"
# Any fractional samples are truncated here
samplerange = (int(plotrange[0]*self.fs), int(plotrange[1]*self.fs))
timerange = np.linspace(
plotrange[0], plotrange[1], num=samplerange[1]-samplerange[0], endpoint=False)
plt.figure(1)
plt.title("%s" % self.__class__.__name__)
if ch != 'all':
plt.plot(timerange, self.samples[samplerange[0]:samplerange[1], ch-1], **kwargs)
else:
plt.plot(timerange, self.samples[samplerange[0]:samplerange[1], :], **kwargs)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude [linear]')
if 'label' in kwargs:
plt.legend(loc='best')
plt.grid(True)
if plotname is None:
plt.show()
else:
plt.savefig(plotname)
plt.close(1)
def plot_fft(self, plotname=None, window='hann', normalise=True, **kwargs):
"""Make a plot (in the frequency domain) of all channels"""
ymin = kwargs.get('ymin', -160) # dB
freq, mag = self.fft(window=window, normalise=normalise)
fig_id = 1
plt.figure(fig_id)
#plt.semilogx(freq, mag, **kwargs) # plots all channel directly
for ch in range(self.ch):
plt.semilogx(freq, mag[:, ch], label='ch%2i' % (ch+1))
plt.xlim(left=1) # we're not interested in freqs. below 1 Hz
plt.ylim(bottom=ymin)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
plt.legend(loc='best')
plt.grid(True)
if plotname is None:
plt.show()
else:
plt.savefig(plotname)
plt.close(fig_id)
def fft(self, window='hann', normalise=True):
"""Calculate the FFT of all channels. Returns data up to fs/2"""
fftsize = self.nofsamples
# Avoid Mersenne Primes
if fftsize in [(2**13)-1, (2**17)-1, (2**19)-1, (2**31)-1]:
self._logger.warn("FFT size is a Mersenne Prime, increasing size by 1")
fftsize = fftsize+1
self._logger.debug("fftsize: %i" % fftsize)
self._logger.debug("window : %s" % str(window))
win = scipy.signal.windows.get_window(window, Nx=self.nofsamples) # not fftsize!
win = np.expand_dims(win, axis=1)
y = self.samples*win
Y = np.fft.fft(y, n=fftsize, axis=0)
if normalise:
Y = Y/fftsize
mag = lin2db(np.abs(Y))
frq = np.fft.fftfreq(fftsize, 1/self.fs)
frq = frq[:int(fftsize/2)]
mag = mag[:int(fftsize/2)]
return frq, mag
def dither(self, bits=16, distribution='TPDF'):
raise NotImplementedError('TODO')
assert distribution == 'TPDF', \
"Only the Triangular Probability Density Function is implemented"
# Triangular Probability Density Function
#noise = np.random.triangular(-1, 0, 1, self.samples.shape)
def decimate(self, N):
"""
Throw away samples. Keep every Nth sample.
Converts **IN PLACE**
This is half of a downsampler. The other half would be to make sure
that the sampling theorem isn't violated for the new sample rate. So
the appropriate low pass filtering has to be done on the audio before
this metod is called, otherwise aliasing might occur.
"""
self._logger.debug("decimate by factor N: %i", N)
# Use the slice operator. Efficient and fast. Will silently ignore the
# last incomplete chunk; for example 100 decimated by 3 leaves 1
# sample at the end that cannot be used.
self.samples = self.samples[::N]
self._logger.debug("decimated: %s", self.samples.shape)
self.nofsamples = len(self.samples)
self.set_sample_rate(self.fs/N)
def resample(self, targetrate=8000, converter_type="sinc_best"):
"""Use the python bindings for the Secret Rabbit Code library
(aka libsamplerate) to perform sample rate conversion.
Converts **IN PLACE**
https://pypi.org/project/samplerate/
http://www.mega-nerd.com/SRC/index.html
"""
# From API docs:
# src_ratio : Equal to output_sample_rate / input_sample_rate.
ratio = targetrate / self.fs
self._logger.debug(
"source: %.2f destination: %.2f ratio %f",
self.fs, targetrate, ratio)
# We can use the simple API here since we always operate on the whole
# audio clip. See http://www.mega-nerd.com/SRC/api_simple.html
self.samples = samplerate.resample(
self.samples, ratio, converter_type=converter_type)
# The number of samples have changed, that is the whole point of
# this operation. Get the new values and calculate the new duration,
# hopefully that shouldn't change too much.
self.nofsamples, self.ch = self.samples.shape
self.fs = targetrate
self._set_duration()
def set_sample_rate(self, new_fs):
"""Change the sample rate fs *without* up/down sample conversion.
This would be the same as changing the playback speed. All data is
left intact and only the time parameters (fs and duration) are
changed.
"""
ratio = new_fs/self.fs
self.fs = new_fs
self._logger.debug('ratio: %.3f' % ratio)
self._set_duration()
return ratio
def normalise(self):
"""Normalise samples so that the new range is
[-1.0, 1.0] for floats
Converts **IN PLACE**
TODO: verify
[-2^n, 2^n-1] for ints
"""
peaks, unused_idx = self.peak()
self._logger.debug("raw peaks: %s" % peaks)
max_abs = np.max(np.absolute(peaks))
self._logger.debug("max_abs: %s" % max_abs)
self.samples = self.samples/max_abs
peaks, unused_idx = self.peak()
self._logger.debug("new peaks: %s" % peaks)
# ==================================================================================================
# Audio sub-classes
# ==================================================================================================
class Sinetone(Audio):
def __init__(self, f0=997, fs=96000, duration=None, gaindb=0, nofsamples=0, phasedeg=0):
"""Generate a sine tone"""
assert f0 < fs/2, "Sampling theorem is violated"
Audio.__init__(self, channels=1, fs=fs, nofsamples=nofsamples, duration=duration)
self.f0 = f0
self.phasedeg = phasedeg
self._set_samples(idx=0, samples=self._sine_gen(f0, phasedeg))
self.gain(gaindb)
def _sine_gen(self, freq, pha):
return np.sin(2*np.pi*freq*self.get_time()+np.deg2rad(pha))
def __repr__(self):
assert self.ch == 1, "If a channel has been appended we don't know anything about its data"
s = 'Sinetone(f0=%r, fs=%r, nofsamples=%r, gaindb=%r, phasedeg=%r)' \
% (self.f0, self.fs, self.nofsamples,
lin2db(abs(float(self.peak()[0]))), # only one channel here.
self.phasedeg)
return s
def __str__(self):
s = Audio.__str__(self)
s += 'frequency : %.1f [Hz]\n' % self.f0
s += 'phase : %.1f [deg]\n' % self.phasedeg
s += '-----------------:---------------------\n'
return s
def set_sample_rate(self, new_fs):
ratio = Audio.set_sample_rate(self, new_fs)
self.f0 = ratio * self.f0
class Sinetones(Sinetone):
def __init__(self, *args, **kwargs):
"""Generate multiple sinetones. This is a quick way to generate multichannel audio.
Each positional argument generates a sinetone at that channel. Setting the frequency
to 0 guarantees that the channel is muted (contains samples with the value 0).
Keywords accepted are similar to the ones used in the Sinetone() class.
Example:
>>> x = Sinetones(200, 500, 900, fs=24000, duration=1.5, gaindb=-6, phasedeg=0)
>>> print(x)
=======================================
classname : Sinetones
sample rate : 24000.0 [Hz]
channels : 3
duration : 1.500 [s]
datatype : float64
samples per ch : 36000
data size : 0.824 [Mb]
has comment : no
peak : [ 0.5012 0.5012 -0.5012]
RMS : [ 0.3544 0.3544 0.3544]
crestfactor : [ 1.4142 1.4142 1.4142]
-----------------:---------------------
phase (all ch) : 0.0 [deg]
:
channel 1 : 200.0 [Hz]
channel 2 : 500.0 [Hz]
channel 3 : 900.0 [Hz]
-----------------:---------------------
>>>
The gaindb argument can be an iterable of the same length as the number of frequencies
specified. In this case a gain can be applied individually for each channel.
>>> x = Sinetones(1000, 2000, duration=1, gaindb=(-6, -20))
A list can be used as the argument for the frequencies. Use the * notation to expand
the list:
>>> import numpy as np
>>> f = np.zeros(8)
>>> f[3] = 700
>>> f[7] = 2000
>>> x = Sinetones(*f, duration=1)
>>> print(x)
=======================================
classname : Sinetones
sample rate : 96000.0 [Hz]
channels : 8
duration : 1.000 [s]
datatype : float64
samples per ch : 96000
data size : 5.859 [Mb]
has comment : no
peak : [ 0. 0. 0. -1. 0. 0. 0. 1.]
RMS : [ 0. 0. 0. 0.7071 0. 0. 0. 0.7071]
crestfactor : [ nan nan nan 1.4142 nan nan nan 1.4142]
-----------------:---------------------
phase (all ch) : 0.0 [deg]
:
channel 1 :
channel 2 :
channel 3 :
channel 4 : 700.0 [Hz]
channel 5 :
channel 6 :
channel 7 :
channel 8 : 2000.0 [Hz]
-----------------:---------------------
>>>
The argument phasedeg applies to all channels.
"""
fs = kwargs.pop('fs', 96000)
duration = kwargs.pop('duration', None)
nofsamples = kwargs.pop('nofsamples', 0)
self._gaindb = kwargs.pop('gaindb', 0)
self.phasedeg = kwargs.pop('phasedeg', 0)
self.frequencies = args
for frequency in self.frequencies:
assert frequency < fs/2, "Sampling theorem is violated for frequency %.1f" % frequency
if not isinstance(self._gaindb, int):
assert len(self._gaindb) == len(self.frequencies), \
"set as many gains as channels used: %i != %i" % (len(self._gaindb),
len(self.frequencies))
Audio.__init__(self, channels=len(self.frequencies), fs=fs, nofsamples=nofsamples,
duration=duration)
for i, frequency in enumerate(self.frequencies):
if frequency != 0:
self._set_samples(idx=i, samples=self._sine_gen(frequency, self.phasedeg))
else:
pass # channel is silence
self.gain(self._gaindb)
def __repr__(self):
s = 'Sinetones(*%r, fs=%r, nofsamples=%r, gaindb=%r, phasedeg=%r)' \
% (list(self.frequencies), self.fs, self.nofsamples, self._gaindb, self.phasedeg)
return s
def __str__(self):
s = Audio.__str__(self)
s += 'phase (all ch) : %.1f [deg]\n' % self.phasedeg
s += ' :\n'
for i, frequency in enumerate(self.frequencies):
if frequency != 0:
s += 'channel %2i : %.1f [Hz]\n' % (i+1, frequency)
else:
s += 'channel %2i :\n' % (i+1)
s += '-----------------:---------------------\n'
return s
def set_sample_rate(self, new_fs):
ratio = Audio.set_sample_rate(self, new_fs)
self.frequencies = [ratio*f for f in self.frequencies]
class SquareWave(Audio):
def __init__(self, f0=997, fs=96000, duration=None, gaindb=0, nofsamples=0,
phasedeg=0, dutycycle=0.5):
"""Generate an ideal squarewave."""
assert f0 < fs/2, "Sampling theorem is violated"
assert dutycycle < 1 and dutycycle > 0
Audio.__init__(self, channels=1, fs=fs, nofsamples=nofsamples, duration=duration)
self.f0 = f0
self.phasedeg = phasedeg
self.dutycycle = dutycycle
samples = scipy.signal.square(2*np.pi*f0*self.get_time()+np.deg2rad(phasedeg),
duty=dutycycle)
self._set_samples(idx=0, samples=samples)
self.gain(gaindb)
def __repr__(self):
assert self.ch == 1, "If a channel has been appended we don't know anything about its data"
s = 'SquareWave(f0=%r, fs=%r, gaindb=%r, nofsamples=%r, phasedeg=%r, dutycycle=%r)' \
% (self.f0, self.fs,
lin2db(abs(float(self.peak()[0]))), # only one channel here.
self.nofsamples, self.phasedeg, self.dutycycle)
return s
def __str__(self):
s = Audio.__str__(self)
s += 'frequency : %.1f [Hz]\n' % self.f0
s += 'phase : %.1f [deg]\n' % self.phasedeg
s += 'duty cycle : %.3f (%4.1f%%)\n' % (self.dutycycle, self.dutycycle*100)
s += '-----------------:---------------------\n'
return s
def set_sample_rate(self, new_fs):
ratio = Audio.set_sample_rate(self, new_fs)
self.f0 = ratio * self.f0
class FourierSeries(Sinetone):
def __init__(self, f0=997, fs=96000, duration=None, gaindb=0, nofsamples=0,
phasedeg=0, harmonics=7,):
"""Construct a square wave by adding odd harmonics with decreasing
amplitude, i.e. Fourier Series.
"""
Sinetone.__init__(self, f0=f0, phasedeg=phasedeg, fs=fs, nofsamples=nofsamples,
duration=duration, gaindb=0)
assert harmonics >= 0
self.harmonics = harmonics
self._logger.debug("fundamental f0: %.1f" % f0)
for n in range(3, 2*(self.harmonics+1), 2):
if n <= 15:
self._logger.debug("adding harmonic n: %2i with amplitude 1/%i" % (n, n))
if n == 17:
self._logger.debug("adding %i more harmonics..." % (self.harmonics-(n-3)//2))
#self.samples[:,0] += np.sin(2*np.pi*(n*f0)*self.get_time()+np.deg2rad(phasedeg*n))/n
self.samples[:, 0] += (1/n)*self._sine_gen(n*f0, n*phasedeg)
self.gain(gaindb)
def __repr__(self):
assert self.ch == 1, "If a channel has been appended we don't know anything about its data"
s = 'FourierSeries(f0=%r, fs=%r, gaindb=%r, nofsamples=%r, phasedeg=%r, harmonics=%r)' \
% (self.f0, self.fs,
lin2db(abs(float(self.peak()[0]))), # only one channel here.
self.nofsamples, self.phasedeg, self.harmonics)
return s
def __str__(self):
s = Sinetone.__str__(self)
s = s.rstrip('-----------------:---------------------\n')
s += '\n'
s += 'harmonics : %i \n' % self.harmonics
s += '-----------------:---------------------\n'
return s
class Noise(Audio):
colours = ('white', 'pink', 'brown', 'blue', 'violet', 'grey')
def __init__(self, channels=1, fs=96000, duration=None, gaindb=-10, nofsamples=0,
colour='white'):
"""Generate uncorrelated noise.
white : flat power spectral density
pink : -3dB per octave
brown(ian) : -6dB per octave
blue : +3dB per octave
violet : +6dB per octave
grey : equal loudness
"""
assert colour in Noise.colours, "choose the colour of the noise: %s" % str(Noise.colours)
Audio.__init__(self, channels=channels, fs=fs, nofsamples=nofsamples, duration=duration)
# the distribution in np.random.uniform is half open, i.e -1.0 is
# included but 1.0 is not. Possible fix: use integers instead, then
# scale to floats. Might not work, since the integers will be
# represented using twos complement and we then have an asymmetrical
# range anyhow.
self._colour = colour
# first generate uniformly distributed noise, i.e. white noise. Then filter
# to get the required shape.
for ch in range(channels):
self._set_samples(idx=ch,
samples=np.random.uniform(low=-1.0, high=1.0, size=self.nofsamples))
if self._colour == 'pink':
# -3dB per octave
self._logger.debug("filtering to get pink noise")
# http://dsp.stackexchange.com/q/322/6999
B = [0.049922035, -0.095993537, 0.050612699, -0.004408786]
A = [1, -2.494956002, 2.017265875, -0.522189400]
self.samples = scipy.signal.lfilter(B, A, self.samples, axis=0)
elif self._colour == 'brown':
# -6dB per octave
raise NotImplementedError('TODO')
elif self._colour == 'blue':
# +3dB per octave
raise NotImplementedError('TODO')
elif self._colour == 'violet':
# +6dB per octave
raise NotImplementedError('TODO')
elif self._colour == 'grey':
# equal loudness
raise NotImplementedError('TODO')
self.gain(gaindb)
def __str__(self):
s = Audio.__str__(self)
s += 'colour : %s\n' % self._colour
s += '-----------------:---------------------\n'
return s
class WavFile(Audio):
def __init__(self, filename=None, scale2float=True):
"""Read a .wav file from disk"""
assert filename is not None, "Specify a filename"
self.filename = filename
fs, samples = scipy.io.wavfile.read(filename)
if samples.ndim == 1:
samples = np.expand_dims(samples, axis=1)
Audio.__init__(self, fs=fs, initialdata=samples)
del samples # just to make sure
if scale2float:
self.convert_to_float(targetbits=64)
def __str__(self):
s = Audio.__str__(self)
s += 'filename : %s\n' % os.path.basename(self.filename)
s += '-----------------:---------------------\n'
return s
# ==================================================================================================
# Functions
# ==================================================================================================
def lin2db(lin):
with np.errstate(divide='ignore'):
# linear value 0 is common (as -inf dB) so we ignore any division warnings
db = 20*np.log10(lin)
return db
def pow2db(power):
with np.errstate(divide='ignore'):
# ignore any division warnings
db = 10*np.log10(power)
return db
def db2lin(db):
lin = np.power(10, np.array(db)/20)
return lin
def db2pow(db):
power = np.power(10, np.array(db)/10)
return power
def speed_of_sound(temperature=20, medium='air'):
"""The speed of sound is depending on the medium and the temperature. For air at
a temperature of 20 degree Celcius the speed of sound is approximately 343 [m/s]
"""
assert medium in ['air', ], "TODO: water, iron"
c = float('nan')
if medium == 'air':
c = 331.3*np.sqrt(1+temperature/273.15)
return c
def wavelength(frequency, speed=343.2):
"""Calculate the wavelength l of frequency f given the speed (of sound)"""
length = speed/frequency
return length
def rad2hz(w0, fs=96000):
"""Calculate a normalised rotational frequency so that w0=2*pi --> f0=fs
w0
f0 = fs * ------
2*pi
"""
return fs*np.array(w0)/(2*np.pi)
def hz2rad(f0, fs=96000):
"""Calculate a normalised angular frequency so that f0=fs --> w0=2*pi
1
w0 = ----- * 2*pi*f0
fs
"""
return (1/fs)*2*np.pi*np.array(f0)
__all__ = [
# classes
'Audio',
'Sinetone',
'Sinetones',
'SquareWave',
'FourierSeries',
'Noise',
'WavFile',
# functions
'lin2db',
'pow2db',
'db2lin',
'db2pow',
'speed_of_sound',
'wavelength',
'rad2hz',
'hz2rad',
]
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG',
)
logging.getLogger("matplotlib").setLevel(logging.INFO)
print('-- Done --')
| mit |
ahuang11/ahh | setup.py | 1 | 1228 | from setuptools import setup, find_packages
import sys
import glob
sys.path.append('builder/')
from conf import source_version
__author__ = '[email protected]'
__copyright__ = 'Andrew Huang'
setup(name='ahh',
license='MIT',
version=source_version,
description='Functions that I can easily reference, and maybe you too!',
packages=find_packages(exclude=['cartopy', 'basemap']),
install_requires=[
'matplotlib',
'numpy',
'pandas',
'xarray',
'netCDF4',
'bokeh',
'scipy',
],
author='Andrew Huang',
author_email='[email protected]',
url='https://github.com/ahuang11/ahh',
keywords=['data', 'visualization',
'analysis', 'streamline',
'andrew', 'huang', 'helps'],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
| mit |
CalebBell/fluids | docs/plots/contraction_conical_plot.py | 1 | 1411 | import matplotlib.pyplot as plt
import numpy as np
from fluids.fittings import contraction_conical_methods, contraction_conical
styles = ['--', '-.', '-', ':', '.', ',', 'o', 'v', '^', '<', '>', '1', '2', '3', '4']
D_ratios = np.linspace(1-1e-9, .01, 100)
angles = np.array([[2, 4, 8, 10],
[15, 20, 25, 30],
[45, 60, 90, 120],
[135, 150, 165, 180]])
f, axarr = plt.subplots(4, 4)
for angle, axes in zip(angles.ravel(), axarr.ravel()):
for method, style in zip(contraction_conical_methods, styles):
Ks = [contraction_conical(Di1=1, Di2=Di, Re=1E6, angle=angle, method=method) for Di in D_ratios]
Ds2 = D_ratios**2
axes.plot(Ds2, Ks, label=method) # + ', angle = ' + str(angle)
#axes.legend()
axes.set_title(r'$%g^\circ$ Angle' %angle)
#axes.set_xlabel('Area ratio')
#axes.set_ylabel('K')
for item in ([axes.title, axes.xaxis.label, axes.yaxis.label] +
axes.get_xticklabels() + axes.get_yticklabels()):
item.set_fontsize(6.5)
ttl = axes.title.set_position([.5, .93])
plt.subplots_adjust(wspace=.35, hspace=.35)
f.suptitle('Comparison of available methods for conical pipe contractions\n Area ratio (x) vs. Loss coefficient (y)')
plt.legend(loc='upper center', bbox_to_anchor=(1.65, 4.7))
plt.subplots_adjust(right=0.82)
#plt.show()
| mit |
soulmachine/scikit-learn | sklearn/utils/tests/test_random.py | 20 | 3872 | from __future__ import division
import numpy as np
from scipy.misc import comb as combinations
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
| bsd-3-clause |
0x0all/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 2 | 22917 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
"""Tests the ridge object using individual penalties"""
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'auto' can handle negative labels.
clf = RidgeClassifier(class_weight='auto')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'auto', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='auto')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridge_sample_weights_in_feature_space():
"""Check that Cholesky solver in feature space applies sample_weights
correctly.
"""
rng = np.random.RandomState(42)
n_samples_list = [5, 6, 7] * 2
n_features_list = [7, 6, 5] * 2
n_targets_list = [1, 1, 1, 2, 2, 2]
noise = 1.
alpha = 2.
alpha = np.atleast_1d(alpha)
for n_samples, n_features, n_targets in zip(n_samples_list,
n_features_list,
n_targets_list):
X = rng.randn(n_samples, n_features)
beta = rng.randn(n_features, n_targets)
Y = X.dot(beta)
Y_noisy = Y + rng.randn(*Y.shape) * np.sqrt((Y ** 2).sum(0)) * noise
K = X.dot(X.T)
sample_weights = 1. + (rng.randn(n_samples) ** 2) * 10
coef_sample_space = _solve_cholesky_kernel(K, Y_noisy, alpha,
sample_weight=sample_weights)
coef_feature_space = _solve_cholesky(X, Y_noisy, alpha,
sample_weight=sample_weights)
assert_array_almost_equal(X.T.dot(coef_sample_space),
coef_feature_space.T)
def test_raises_value_error_if_sample_weights_greater_than_1d():
"""Sample weights must be either scalar or 1D"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
"""Sample weights must work with sparse matrices"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_deprecation_warning_dense_cholesky():
"""Tests if DeprecationWarning is raised at instantiation of estimators
and when ridge_regression is called"""
warning_class = DeprecationWarning
warning_message = ("The name 'dense_cholesky' is deprecated."
" Using 'cholesky' instead")
X = np.ones([2, 3])
y = np.ones(2)
func1 = lambda: Ridge(solver='dense_cholesky').fit(X, y)
func2 = lambda: RidgeClassifier(solver='dense_cholesky').fit(X, y)
X = np.ones([3, 2])
y = np.zeros(3)
func3 = lambda: ridge_regression(X, y, alpha=1, solver='dense_cholesky')
for func in [func1, func2, func3]:
assert_warns_message(warning_class, warning_message, func)
def test_raises_value_error_if_solver_not_supported():
"""Tests whether a ValueError is raised if a non-identified solver
is passed to ridge_regression"""
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
MatthieuBizien/scikit-learn | build_tools/cythonize.py | 42 | 6375 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'sklearn'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script detects changes in the pyx/pxd files using checksums
[or hashes] stored in a database file
Simple script to invoke Cython on all .pyx
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
It is called by ./setup.py sdist so that sdist package can be installed without
cython
Originally written by Dag Sverre Seljebotn, and adapted from statsmodel 0.6.1
(Modified BSD 3-clause)
We copied it for scikit-learn.
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files or their corresponding Cython header (.pxd)
files.
"""
# Author: Arthur Mensch <[email protected]>
# Author: Raghav R V <[email protected]>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
def cythonize(cython_file, gen_file):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.21'):
raise Exception('Building scikit-learn requires Cython >= 0.21')
except ImportError:
pass
flags = ['--fast-fail']
if gen_file.endswith('.cpp'):
flags += ['--cplus']
try:
try:
rc = subprocess.call(['cython'] +
flags + ["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see scipy issue gh-2397.
rc = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main '
'import setuptools_main as main;'
' sys.exit(main())'] + flags +
["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
raise OSError('Cython needs to be installed')
def load_hashes(filename):
"""Load the hashes dict from the hashfile"""
# { filename : (sha1 of header if available or 'NA',
# sha1 of input,
# sha1 of output) }
hashes = {}
try:
with open(filename, 'r') as cython_hash_file:
for hash_record in cython_hash_file:
(filename, header_hash,
cython_hash, gen_file_hash) = hash_record.split()
hashes[filename] = (header_hash, cython_hash, gen_file_hash)
except (KeyError, ValueError, AttributeError, IOError):
hashes = {}
return hashes
def save_hashes(hashes, filename):
"""Save the hashes dict to the hashfile"""
with open(filename, 'w') as cython_hash_file:
for key, value in hashes.items():
cython_hash_file.write("%s %s %s %s\n"
% (key, value[0], value[1], value[2]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
def clean_path(path):
"""Clean the path"""
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash_tuple(header_path, cython_path, gen_file_path):
"""Get the hashes from the given files"""
header_hash = (sha1_of_file(header_path)
if os.path.exists(header_path) else 'NA')
from_hash = sha1_of_file(cython_path)
to_hash = (sha1_of_file(gen_file_path)
if os.path.exists(gen_file_path) else 'NA')
return header_hash, from_hash, to_hash
def cythonize_if_unchanged(path, cython_file, gen_file, hashes):
full_cython_path = os.path.join(path, cython_file)
full_header_path = full_cython_path.replace('.pyx', '.pxd')
full_gen_file_path = os.path.join(path, gen_file)
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
if current_hash == hashes.get(clean_path(full_cython_path)):
print('%s has not changed' % full_cython_path)
return
print('Processing %s' % full_cython_path)
cythonize(full_cython_path, full_gen_file_path)
# changed target file, recompute hash
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
# Update the hashes dict with the new hash
hashes[clean_path(full_cython_path)] = current_hash
def check_and_cythonize(root_dir):
print(root_dir)
hashes = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
if filename.endswith('.pyx'):
gen_file_ext = '.c'
# Cython files with libcpp imports should be compiled to cpp
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(b"libcpp", data, re.I | re.M)
if m:
gen_file_ext = ".cpp"
cython_file = filename
gen_file = filename.replace('.pyx', gen_file_ext)
cythonize_if_unchanged(cur_dir, cython_file, gen_file, hashes)
# Save hashes once per module. This prevents cythonizing prev.
# files again when debugging broken code in a single file
save_hashes(hashes, HASH_FILE)
def main(root_dir=DEFAULT_ROOT):
check_and_cythonize(root_dir)
if __name__ == '__main__':
try:
root_dir_arg = sys.argv[1]
except IndexError:
root_dir_arg = DEFAULT_ROOT
main(root_dir_arg)
| bsd-3-clause |
erdc/proteus | proteus/tests/POD/deim_utils.py | 1 | 5097 | #!/usr/bin/env python
"""
utility module for generating deim interpolants
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import numpy as np
def read_from_hdf5(hdfFile,label,dof_map=None):
"""
Just grab the array stored in the node with label label and return it
If dof_map is not none, use this to map values in the array
If dof_map is not none, this determines shape of the output array
"""
assert hdfFile is not None, "requires hdf5 for heavy data"
vals = hdfFile.get_node(label).read()
if dof_map is not None:
dof = vals[dof_map]
else:
dof = vals
return dof
def read_snapshots(archive,nsnap,val_name):
"""
assumes nsnap values of array in val_name are stored in h5file as
/val_name'i' for i=0,nspap-1
loads these into a matrix and returns
"""
label_base="/%s%d"
u = read_from_hdf5(archive.hdfFile,label_base % (val_name,0))
S = np.reshape(u,(u.shape[0],1))
for i in range(1,nsnap):
label=label_base % (val_name,i)
u = read_from_hdf5(archive.hdfFile,label)
u = np.reshape(u,(u.shape[0],1))
S = np.append(S,u,axis=1)
#
return S
def generate_svd_decomposition(archive,nsnap,val_name,outbase):
"""
assumes nsnap values of array in val_name are stored in h5file as
/val_name'i' for i=0,nspap-1
loads these into a matrix, performs an SVD, and stores the output in outbase_SVD_basis,
outbase_singular_values in numpy's binary format
returns U,s,V svd decomposition of snapshots
"""
S = read_snapshots(archive,nsnap,val_name)
U, s, V= np.linalg.svd(S,full_matrices=False)
np.savetxt(outbase+'_SVD_basis',U,delimiter=' ')
np.savetxt(outbase+'_SVD_singular_values',s,delimiter=' ')
return U,s,V
def calculate_deim_indices(Uin):
"""
input: Uin n x m array of basis vectors for nonlinear function snapshots
output: rho, m vector of indices \rho_i for extracting $\vec F$ values
"""
n,m=Uin.shape
rind = np.argmax(np.absolute(Uin[:,0]))
U=np.array(Uin[:,0])
rho=np.array([rind],'i')
#Pt = np.zeros((1,n),'d')
#P[0,rind]=1.0
for j in range(1,m):
u = Uin[:,j]
Up=U[rho]#Up= np.dot(Pt,U)
up=u[rho]#up= np.dot(Pt,u)
if j==1:
c=old_div(up,Up)
r=u-U*c
else:
c =np.linalg.solve(Up,up)
r=u-np.dot(U,c)
rind=np.argmax(np.absolute(r))
rho_new = np.zeros(j+1,'i');
rho_new[:-1]=rho; rho_new[-1]=rind; rho = rho_new
U_new=np.zeros((n,j+1),'d')
U_new[:,:-1]=U.reshape(n,j); U_new[:,-1]=u
U=U_new
#
return rho
def deim_alg(Uin,m):
"""
Basic procedure
- given $m$, dimension for $F$ reduced basis $\mathbf{U}_m$
- call DEIM algorithm to determine $\vec \rho$.
- build $\mathbf{P}$ from $\rho$ as
$$
\mathbf{P} = [\vec e_{\rho_1},\vec e_{\rho_2},\dots,\vec e_{\rho_m}]
$$
- invert $\mathbf{P}^T\mathbf{U}_m$
- return \rho and $\mathbf{P}_F=\mathbf{U}_m(\mathbf{P}^T\mathbf{U}_m)^{-1}$
"""
assert m <= Uin.shape[1]
Um = Uin[:,0:m]
rho = calculate_deim_indices(Um)
PtUm = Um[rho]
assert PtUm.shape == (m,m)
PtUmInv = np.linalg.inv(PtUm)
PF= np.dot(Um,PtUmInv)
return rho,PF
def visualize_zslice(variable,nnx,nny,iz,x=None,y=None,name=None):
"""
convenience function for plotting a slice
"""
istart = nnx*nny*iz
iend = nnx*nny*(iz+1)
v_slice= variable[istart:iend]
v_slice= v_slice.reshape(nnx,nny)
if x is None:
x = np.outer(np.arange(nnx),np.arange(nnx))
if y is None:
y = np.outer(np.arange(nny),np.arange(nny))
assert x.shape == v_slice.shape
assert y.shape == v_slice.shape
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
fig = plt.figure()
ax = fig.gca(projection='3d')
surf=ax.plot_surface(x,y,v_slice,rstride=1,cstride=1,cmap=cm.coolwarm,linewidth=0,antialiased=False)
plt.xlabel('x'); plt.ylabel('y')
if name is None:
name = 'deim_slice_z={0}.png'.format(iz)
plt.savefig(name)
return surf
def extract_sub_matrix_csr(rho,rowptr,colind,nnzval):
"""
manually extract the rows in the deim index vector rho from a csr matrix representation
returns a csr representation
"""
m = len(rho)
rowptr_sub = np.zeros(m+1,'i')
nnz_sub = 0
for k,I in enumerate(rho):#count number of nonzero entries
diff = rowptr[I+1]-rowptr[I]
rowptr_sub[k+1]=rowptr_sub[k]+diff
nnz_sub += diff
colind_sub = np.zeros(nnz_sub,'i'); nzval_sub=np.zeros(nnz_sub,'d')
for k,KK in enumerate(rho):
for m,MM in enumerate(range(rowptr[KK],rowptr[KK+1])):
colind_sub[rowptr_sub[k]+m]=colind[MM]
nzval_sub[rowptr_sub[k]+m]=nnzval[MM]
#
return rowptr_sub,colind_sub,nzval_sub
| mit |
wiheto/teneto | setup.py | 1 | 1293 | """General setup for module."""
from setuptools import setup, find_packages
VERSION = "teneto/_version.py"
VERSION = open(VERSION, "rt").read()
VERSION = VERSION.split('"')[1]
setup(name='teneto',
version=VERSION,
python_requires='>3.5',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
install_requires=[
'nilearn>=0.6.0',
'pybids>=0.11.1',
'statsmodels>=0.8.0',
'networkx>=2.0',
'python-louvain>=0.13',
'pandas>=0.21',
'scipy>=1.4.1',
'numpy>=1.19.5',
'templateflow>=0.6.2'],
description='Temporal network tools',
packages=find_packages(),
author='William Hedley Thompson',
author_email='[email protected]',
url='https://www.github.com/wiheto/teneto',
download_url='https://github.com/wiheto/teneto/archive/0.3.3.tar.gz',
package_data={'': ['./teneto/data']},
include_package_data=True,
entry_points={
'console_scripts': ['teneto = teneto.__main__:main']
},
long_description='Temporal network tools. \
A package for deriving, analysing and plotting temporal network representations. \
Additional tools for temporal network analysis with neuroimaging contexts.')
| gpl-3.0 |
Dapid/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
sfepy/sfepy | sfepy/mesh/bspline.py | 5 | 24238 | from __future__ import print_function
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
import numpy as nm
from sfepy.base.base import Struct
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
nm_f64_eps = nm.finfo(nm.float64).eps
def to_ndarray(a):
if a is None:
return None
else:
a = nm.asarray(a)
if len(a.shape) == 0:
a = a.reshape(1)
return a
class BSpline(Struct):
"""
B-spline curve representation
"""
def __init__(self, degree=3, is_cyclic=False, ncp=0):
"""
Initialize B-spline class.
Parameters
----------
degree : int
The degree of the spline function.
is_cyclic : bool
Cyclic spline?.
ncp : int
The number of control points.
"""
self.degree = degree
self.knot_type = None
self.is_cyclic = is_cyclic
self.ncp = ncp
self.knots = None
self.basis = None
self.curve_coors = None
self.cp_coors = None
self.approx_coors = None
self.t = None
def set_control_points(self, coors, cyclic_form=False):
"""
Set the B-spline control points.
Parameters
----------
coors : array
The coordinates of unique control points.
cyclic_form : bool
Are the control points in the cyclic form?
"""
coors = to_ndarray(coors)
if self.is_cyclic and not cyclic_form:
coors = nm.vstack((coors, coors[0:self.degree,:]))
self.cp_coors = coors
self.ncp = coors.shape[0]
def get_control_points(self):
"""
Get the B-spline control points.
Returns
-------
coors : array
The coordinates of control points.
"""
if self.is_cyclic:
return self.cp_coors[:-self.degree,:]
else:
return self.cp_coors
def set_param(self, t):
"""
Set the B-spline parametric vector.
Parameters
----------
t : array
The parameter vector of the B-spline.
"""
self.t = to_ndarray(t)
if self.knots is not None:
endval = self.knots[-(self.degree + 1)]
idxs = nm.where(self.t == endval)[0]
self.t[idxs] -= nm_f64_eps
def set_param_n(self, n=100, knot_range=(0.0, 1.0)):
"""
Generate the B-spline parametric vector using the number of steps.
Parameters
----------
n : array
The number of steps in the B-spline parametric vector.
"""
self.t = nm.linspace(knot_range[0], knot_range[1], n)
self.t[-1] -= nm_f64_eps
@staticmethod
def basis_function_dg0(t, knots, n):
"""
Basis function: degree = 0
Parameters
----------
t : array
The parametric vector.
knots : array
The knot vector.
n : int
The number of intervals.
Returns
-------
bfun : array
The spline basis function evaluated for given values.
"""
nt = len(t)
bfun = nm.zeros((nt,n), dtype=nm.float64)
for ii in range(n):
idxs = nm.where(nm.logical_and(knots[ii] <= t,
t < knots[ii + 1]))[0]
bfun[idxs,ii] = 1.0
return bfun
@staticmethod
def basis_function_dg(degree, t, knots, n):
"""
B-spline basis functions.
Parameters
----------
degree : int
The degree of the spline function.
t : array
The parametric vector.
knots : array
The knot vector.
n : int
The number of intervals.
Returns
-------
bfun : array
The spline basis function evaluated for given values.
"""
if degree >= 1:
bfun_dgm1 = BSpline.basis_function_dg(degree - 1, t,
knots, n + 1)
nt = len(t)
bfun = nm.zeros((nt,n), dtype=nm.float64)
for ii in range(n):
c1 = t - knots[ii]
c2 = knots[ii + degree] - knots[ii]
if nm.abs(c2) > nm_f64_eps:
bfun[:,ii] = c1 / c2 * bfun_dgm1[:,ii]
c1 = knots[ii + degree + 1] - t
c2 = knots[ii + degree + 1] - knots[ii + 1]
if nm.abs(c2) > nm_f64_eps:
bfun[:,ii] += c1 / c2 * bfun_dgm1[:,ii + 1]
else:
bfun = BSpline.basis_function_dg0(t, knots, n)
return bfun
def make_knot_vector(self, knot_type='clamped', knot_data=None,
knot_range=(0.0, 1.0)):
"""
Create a knot vector of the requested type.
Parameters
----------
knot_type : str
The knot vector type: clamped/cyclic/userdef.
knot_data :
The extra knot data.
"""
if self.is_cyclic and 'cyclic' not in knot_type:
knot_type = 'cyclic'
ncp = self.ncp
dg = self.degree
n_knots = dg + ncp + 1
n_inter = n_knots - 2 * dg
aux = nm.linspace(knot_range[0], knot_range[1], n_inter)
if knot_type == '' or knot_type == 'cyclic':
dd = aux[1]
self.knots = nm.hstack((nm.arange(-dg, 0) * dd,
aux,
nm.arange(1, dg + 1) * dd + 1))
elif knot_type == 'clamped':
self.knots = nm.array([aux[0]]* dg + list(aux) + [aux[-1]]* dg,
dtype=nm.float64)
else:
raise NotImplementedError
self.knot_type = knot_type
def set_knot_vector(self, knots):
"""
Set the knot vector.
Parameters
----------
knots : array
The knot vector.
"""
self.knot_type = 'userdef'
self.knots = to_ndarray(knots)
def get_knot_vector(self):
"""
Return the knot vector.
Returns
-------
knots : array
The knot vector.
"""
return self.knots
def insert_knot(self, new):
"""
Insert a new knot into the knot vector.
Parameters
----------
new : float
The new knot value.
"""
kn = self.knots
dg = self.degree
ncp = self.ncp
cp = self.cp_coors
idx = nm.where(nm.logical_and(kn[:-1] <= new, new < kn[1:]))[0]
if len(idx) > 0:
multi = len(nm.where(kn == new)[0])
if multi < dg:
# new knot
newkn = nm.zeros((len(kn) + 1,), dtype=nm.float64)
newkn[:(idx + 1)] = kn[:(idx + 1)]
newkn[idx + 1] = new
newkn[(idx + 2):] = kn[(idx + 1):]
u1 = idx - dg + 1
u2 = idx + 1
# new control points
newcp = nm.zeros((ncp + 1, cp.shape[1]), dtype=nm.float64)
newcp[:u1,:] = cp[:u1,:]
newcp[u2:,:] = cp[(u2 - 1):,:]
for ii in range(u1, u2):
kn1 = kn[ii]
kn2 = kn[ii + dg]
dd = kn2 - kn1
newcp[ii,:] = (kn2 - new) / dd * cp[ii - 1] + \
(new - kn1) / dd * cp[ii]
self.knots = newkn
self.cp_coors = newcp
self.ncp = newcp.shape[0]
# evaluate B-spline base functions for new configuration
self.eval_basis()
else:
print('knot insertion failed: multiplicity = spline degree!')
else:
print('knot insertion failed: out of bounds!')
def eval_basis(self, t=None, return_val=False):
"""
Evaluate the basis of the bpsline.
Parameters
----------
t : array
The parameter vector of the B-spline.
"""
if t is not None:
self.set_param(t)
if self.knots is None:
self.make_knot_vector()
if self.t is None:
self.set_param_n()
self.basis = self.basis_function_dg(self.degree, self.t,
self.knots, self.ncp)
if return_val:
return self.basis
def eval(self, t=None, cp_coors=None):
"""
Evaluate the coordinates of the bpsline curve.
Parameters
----------
t : array
The parameter vector of the B-spline.
cp_coors : array
The coordinates of the control points.
"""
if cp_coors is not None:
self.set_control_points(cp_coors)
self.eval_basis(t)
self.curve_coors = nm.dot(self.basis, self.cp_coors)
return self.curve_coors
def draw(self, ret_ax=False, ax=None, color='r', cp_id=True):
"""
Draw B-spline curve.
Parameters
----------
ret_ax : bool
Return an axes object?
ax : axes object
The axes to which will be drawn.
color : str
Line color.
cp_id : bool
If True, label control points.
"""
if self.curve_coors is None:
self.eval()
cc = self.curve_coors
cp = self.cp_coors
ci = self.approx_coors
if ci is not None and self.is_cyclic:
ci = nm.vstack((ci, ci[0,:]))
if cc.shape[1] == 3:
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(cc[:,0], cc[:,1], cc[:,2], color + '-')
if cp_id:
ax.plot(cp[:,0], cp[:,1], cp[:,2], 'ko:', alpha=0.6)
if ci is not None:
ax.plot(ci[:,0], ci[:,1], ci[:,2], 'b--', alpha=0.6)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(cc[:,0], cc[:,1], color + '-')
if cp_id:
ax.plot(cp[:,0], cp[:,1], 'ko:', alpha=0.6)
for jj, icp in enumerate(self.cp_coors):
ax.text(icp[0], icp[1], 'N%d' % (jj + 1), fontsize=10)
if ci is not None:
ax.plot(ci[:,0], ci[:,1], 'b--', alpha=0.6)
ax.set_aspect('equal')
if ret_ax:
return ax
else:
plt.show()
def draw_basis(self):
"""
Draw B-spline curve.
"""
plt.figure()
plt.plot(self.t,self.basis)
plt.legend(['b%d' % (ii + 1) for ii in range(self.basis.shape[1])])
plt.show()
def approximate(self, coors, ncp=None, knot_type='clamped',
knots=None, alpha=0.5,
do_eval=False, do_param_correction=False):
"""
Approximate set of points by the B-spline curve.
Parameters
----------
coors : array
The coordinates of the approximated points.
ncp : int
The number of control points.
knot_type : str
The knot vector type.
knots : array
The knot vector.
alpha : float
The parameter vector distribution:
1.0 = chordal
0.5 = centripetal
do_eval : bool
Evaluate the curve coordinates?
do_param_correction : bool
Perform parametric corrections to improve the approximation?
"""
coors = to_ndarray(coors)
dg = self.degree
if ncp is not None:
if self.is_cyclic:
ncp += dg
self.ncp = ncp
self.make_knot_vector(knot_type)
if knots is not None:
self.knots = knots
self.knot_type = 'userdef'
ncp = len(knots) - dg - 1
self.ncp = ncp
# param vector
dist = nm.linalg.norm(coors[:-1,:] - coors[1:,:], axis=1)
dista = nm.power(dist, alpha)
self.t = nm.zeros((coors.shape[0],), dtype=nm.float64)
self.t[1:] += dista.cumsum() / dista.sum()
self.t[-1] -= nm_f64_eps
while True:
self.basis = self.basis_function_dg(dg, self.t,
self.knots, ncp)
A = nm.dot(self.basis.T, self.basis)
b = nm.dot(self.basis.T, coors)
# cyclic spline
if self.is_cyclic:
nred = ncp - dg
R = nm.zeros((ncp, nred), dtype=nm.float64)
for ii in range(nred):
R[ii,ii] = 1.0
for ii in range(self.degree):
R[nred + ii,ii] = 1.0
A = nm.dot(R.T, nm.dot(A, R))
b = nm.dot(R.T, b)
self.cp_coors = nm.dot(R, nm.dot(nm.linalg.inv(A), b))
else:
self.cp_coors = nm.dot(nm.linalg.inv(A), b)
self.approx_coors = coors
if not do_param_correction:
break
if do_eval:
self.curve_coors = nm.dot(self.basis, self.cp_coors)
def set_approx_points(self, coors):
"""
Set the coordinates of approximated points.
Parameters
----------
coors : array
The coordinates of approximated points.
"""
self.approx_coors = to_ndarray(coors)
class BSplineSurf(Struct):
"""
B-spline surface representation
"""
def __init__(self, degree=(3,3), is_cyclic=(False, False)):
"""
Initialize B-spline class.
Parameters
----------
degree : tuple of int
The degree of the spline functions.
is_cyclic : tuple of bool
Cyclic splines?.
"""
self.splines = [None, None]
for ii in range(2):
self.splines[ii] = BSpline(degree[ii], is_cyclic=is_cyclic[ii])
self.surf_coors = None
self.cp_coors = None
self.approx_coors = None
def set_control_points(self, coors, cyclic_form=False):
"""
Set the B-spline control points.
Parameters
----------
coors : array
The coordinates of unique control points.
cyclic_form : bool
Are the control points in the cyclic form?
"""
coors = to_ndarray(coors)
if self.splines[0].is_cyclic and not cyclic_form:
coors = nm.vstack((coors, coors[0:self.splines[0].degree,:,:]))
if self.splines[1].is_cyclic and not cyclic_form:
coors = nm.hstack((coors, coors[:,0:self.splines[1].degree,:]))
self.cp_coors = coors
for ii in range(2):
self.splines[ii].ncp = coors.shape[ii]
def get_control_points(self):
"""
Get the B-spline surface control points.
Returns
-------
coors : array
The coordinates of control points.
"""
aux = self.cp_coors
if self.splines[0].is_cyclic:
aux = aux[:-self.splines[0].degree,:,:]
if self.splines[1].is_cyclic:
aux = aux[:,:-self.splines[1].degree,:]
return aux
def make_knot_vector(self, knot_type=('clamped', 'clamped'),
knot_data=(None, None)):
"""
Create a knot vector of the requested type.
Parameters
----------
knot_type : tuple of str
The knot vector types.
knot_data : tuple of ANY
The extra knot data.
"""
for ii in range(2):
self.splines[ii].make_knot_vector(knot_type[ii], knot_data[ii])
def set_param_n(self, n=(100, 100)):
"""
Generate the B-spline parametric vector using the number of steps.
Parameters
----------
n : tuple of array
The number of steps in the B-spline parametric vectors.
"""
for ii in range(2):
self.splines[ii].set_param_n(n[ii])
def set_approx_points(self, coors):
"""
Set the coordinates of approximated points.
Parameters
----------
coors : array
The coordinates of approximated points.
"""
self.approx_coors = to_ndarray(coors)
def eval(self, t=(None, None), cp_coors=None):
"""
Evaluate the coordinates of the bpsline curve.
Parameters
----------
t : tuple of array
The parametric vector of the B-splines.
cp_coors : array
The coordinates of the control points.
"""
if cp_coors is not None:
self.set_control_points(cp_coors)
for ii in range(2):
self.splines[ii].eval_basis(t[ii])
nt = (len(self.splines[0].t), len(self.splines[1].t))
ncp = (self.splines[0].ncp, self.splines[1].ncp)
aux = nm.zeros((nt[0], ncp[1], 3), dtype=nm.float64)
for ii in range(ncp[1]):
aux[:,ii,:] = nm.dot(self.splines[0].basis, self.cp_coors[:,ii,:])
self.surf_coors = nm.zeros(nt + (3,), dtype=nm.float64)
for ii in range(nt[0]):
self.surf_coors[ii,:,:] = nm.dot(self.splines[1].basis, aux[ii,:,:])
return self.surf_coors
def draw(self, ret_ax=False, ax=None):
"""
Draw B-spline surface.
Parameters
----------
ret_ax : bool
Return an axes object?
ax : axes object
The axes to which will be drawn.
"""
if self.surf_coors is None:
self.eval()
fig = plt.figure()
ax = Axes3D(fig)
coors = self.surf_coors
cs = coors.shape
for ii in range(cs[0] - 1):
for jj in range(cs[1] - 1):
verts = nm.array([coors[ii,jj,:],
coors[ii,jj + 1,:],
coors[ii + 1,jj + 1,:],
coors[ii + 1,jj,:]])
quad = Poly3DCollection([verts],
facecolors='gray', edgecolor='k',
linewidths=0.2, alpha=0.5)
ax.add_collection3d(quad)
cp = self.cp_coors
for ii in range(cp.shape[1]):
ax.plot(cp[:,ii,0], cp[:,ii,1], cp[:,ii,2], 'ro--', linewidth=2.0)
for ii in range(cp.shape[0]):
ax.plot(cp[ii,:,0], cp[ii,:,1], cp[ii,:,2], 'ro--', linewidth=2.0)
ax.set_aspect('equal')
plt.show()
def approximate(self, coors, ncp, do_eval=False):
"""
Approximate set of points by the B-spline surface.
Parameters
----------
coors : array
The coordinates of the approximated points.
ncp : tuple of int
The number of control points.
"""
coors = to_ndarray(coors)
nsc = coors.shape[0:2]
aux = nm.zeros((nsc[0], ncp[1], 3), dtype=nm.float64)
spl1 = self.splines[1]
for ii in range(nsc[0]):
spl1.approximate(coors[ii,...], ncp[1])
aux[ii,...] = spl1.get_control_points()
self.cp_coors = nm.zeros((ncp[0], ncp[1], 3), dtype=nm.float64)
spl2 = self.splines[0]
for ii in range(ncp[1]):
spl2.approximate(aux[:,ii,:], ncp[0])
self.cp_coors[:,ii,:] = spl2.get_control_points()
self.approx_coors = coors
def write_surface_vtk(self, filename, float_format='%.6f'):
"""
Write the spline surface to VTK file.
Parameters
----------
filename: str
Name of the VTK file.
float_format: str
Float formating.
"""
coors = self.surf_coors
cs0, cs1 = coors.shape[0:2]
nquads = (cs0 - 1) * (cs1 - 1)
quads = nm.zeros((nquads, 4), dtype=nm.int64)
kk = 0
for ii in range(cs0 - 1):
offs = ii * cs1
for jj in range(cs1 - 1):
quads[kk] = nm.array([jj + offs,
jj + offs + cs1,
jj + 1 + offs + cs1,
jj + 1 + offs])
kk += 1
f = open(filename, 'w')
f.write('# vtk DataFile Version 2.0\n')
f.write('spline surface\nASCII\nDATASET POLYDATA\n')
ff3 = ' '.join([float_format] * 3) + '\n'
f.write('POINTS %d float\n' % (cs0 * cs1))
for ii in range(cs0):
offs = ii * cs1
for jj in range(cs1):
f.write(ff3 % tuple(coors[ii,jj,:]))
f.write('POLYGONS %d %d\n' % (nquads, nquads * 5))
for ii in quads:
f.write('4 %s\n' % (' '.join([str(jj) for jj in ii])))
f.close()
def write_control_polygon_vtk(self, filename, float_format='%.6f'):
"""
Write the control polygon to VTK file.
Parameters
----------
filename: str
Name of the VTK file.
float_format: str
Float formating.
"""
coors = self.cp_coors
cs0, cs1 = coors.shape[0:2]
lines = []
nlines = cs0 + cs1
nlpts = 0
for ii in range(cs0):
lines.append(nm.arange(cs1) + ii * cs1)
nlpts += cs1
for ii in range(cs1):
lines.append(nm.arange(cs0) * cs1 + ii)
nlpts += cs0
f = open(filename, 'w')
f.write('# vtk DataFile Version 2.0\n')
f.write('spline control polygon\nASCII\nDATASET POLYDATA\n')
ff3 = ' '.join([float_format] * 3) + '\n'
f.write('POINTS %d float\n' % (cs0 * cs1))
for ii in range(cs0):
for jj in range(cs1):
f.write(ff3 % tuple(coors[ii,jj,:]))
f.write('LINES %d %d\n' % (nlines, nlines + nlpts))
for ii in lines:
f.write('%d %s\n' % (len(ii), ' '.join([str(jj) for jj in ii])))
f.close()
def get_2d_points(is3d=False):
"""
Returns the set of points.
Parameters
----------
is3d : bool
3D coordinates?
"""
out = nm.array([(-0.87, 0.15, 0),
(-0.70, 0.54, 0),
(-0.32, 0.80, 0),
(0.15, 0.70, 0),
(0.37, 0.26, 0),
(0.70, -0.07, 0),
(0.67, -0.49, 0),
(0.07, -0.81, 0),
(-0.44, -0.72, 0),
(-0.80, -0.34, 0)])
if is3d:
return out
else:
return out[:,:2]
def approximation_example():
"""
The example of using BSplineSurf for approximation
of the surface given by the set of points.
"""
# define sample points in 2D
spts0 = get_2d_points(is3d=True)
# define sample points in 3D
spts = nm.array([spts0,
spts0 * 0.7 + nm.array([0.1,0,0.5]),
spts0 * 0.8 + nm.array([0.2,0,1.5]),
spts0 * 1.2 + nm.array([0.4,0,2.5])])
cyclic=(False, True)
spl1 = BSplineSurf((3, 3), is_cyclic=cyclic)
spl1.approximate(spts, (4,8))
cp = spl1.get_control_points()
spls2 = BSplineSurf((3, 3), is_cyclic=cyclic)
spls2.set_control_points(cp)
spls2.make_knot_vector()
spls2.set_param_n((12, 24))
spls2.eval()
spls2.draw()
def simple_example():
"""
The example of using B-spline class.
"""
# define control points in 2D
cp = get_2d_points()
spl = BSpline(3, is_cyclic=True)
spl.set_control_points(cp)
spl.make_knot_vector()
spl.set_param_n(150)
spl.insert_knot(0.7)
spl.insert_knot(0.7)
spl.insert_knot(0.7)
spl.eval()
spl.draw()
def main(argv):
# simple_example()
approximation_example()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
MalkIPP/openfisca-survey-manager | openfisca_survey_manager/utils.py | 2 | 9010 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from pandas import DataFrame, concat
log = logging.getLogger(__name__)
def clean_data_frame(data_frame):
object_column_names = list(data_frame.select_dtypes(include=["object"]).columns)
log.info(
"The following variables are to be cleaned or left as strings : \n {}".format(object_column_names)
)
for column_name in object_column_names:
if data_frame[column_name].isnull().all(): # drop empty columns
data_frame.drop(column_name, axis = 1, inplace = True)
continue
values = list(data_frame[column_name].value_counts().keys())
empty_string_present = "" in values
if empty_string_present:
values.remove("")
all_digits = all([value.isdigit() for value in values])
no_zero = all([value != 0 for value in values])
if all_digits and no_zero:
log.info(
"Replacing empty string with zero for variable {}".format(column_name)
)
data_frame.replace(
to_replace = {
column_name: {"": 0},
},
inplace = True,
)
log.info(
"Converting string variable {} to integer".format(column_name)
)
data_frame[column_name] = data_frame[column_name].astype("int")
return data_frame
def dump_simulation_results_data_frame(survey_scenario, collection = None):
assert collection is not None
year = survey_scenario.year
data_frame_by_entity = get_calculated_data_frame_by_entity(survey_scenario)
openfisca_survey_collection = SurveyCollection.load(collection = "openfisca")
output_data_directory = openfisca_survey_collection.config.get('data', 'output_directory')
survey_name = "openfisca_data_{}".format(year)
for entity, data_frame in data_frame_by_entity.iteritems():
print entity
table = entity
hdf5_file_path = os.path.join(
os.path.dirname(output_data_directory),
"{}{}".format(survey_name, ".h5"),
)
survey = Survey(
name = survey_name,
hdf5_file_path = hdf5_file_path,
)
survey.insert_table(name = table)
survey.fill_hdf(table, data_frame)
openfisca_survey_collection.surveys[survey_name] = survey
openfisca_survey_collection.dump(collection = "openfisca")
def get_data_frame(columns_name, survey_scenario, load_first = False, collection = None):
year = survey_scenario.year
if survey_scenario.simulation is None:
survey_scenario.new_simulation()
simulation = survey_scenario.simulation
if load_first:
assert collection is not None
entities = [simulation.tax_benefit_system.column_by_name[column_name].entity for column_name in columns_name]
assert len(set(entities)) == 1
# entity_symbol = entities[0]
for entity_key_plural in simulation.entity_by_key_plural:
if columns_name[0] in simulation.entity_by_key_plural[entity_key_plural].column_by_name:
entity = entity_key_plural
break
openfisca_survey_collection = SurveyCollection.load(collection = collection)
survey_name = "openfisca_data_{}".format(year)
survey = openfisca_survey_collection.surveys[survey_name]
table = entity
data_frame = survey.get_values(variables = columns_name, table = table)
else:
data_frame = DataFrame(dict([(column_name, simulation.calculate(column_name)) for column_name in columns_name]))
return data_frame
def get_calculated_data_frame_by_entity(survey_scenario = None):
if survey_scenario.simulation is None:
survey_scenario.new_simulation()
simulation = survey_scenario.simulation
data_frame_by_entity = dict()
for entity in simulation.entity_by_key_plural.itervalues():
variables_name = entity.column_by_name.keys()
data_frame_by_entity[entity] = get_data_frame(variables_name, survey_scenario)
return data_frame_by_entity
def simulation_results_as_data_frame(survey_scenario = None, column_names = None, entity = None, force_sum = False):
assert survey_scenario is not None
assert force_sum is False or entity != 'ind', "force_sum cannot be True when entity is 'ind'"
simulation = survey_scenario.simulation
column_by_name = simulation.tax_benefit_system.column_by_name
assert set(column_names) <= set(column_by_name), \
"Variables {} do not exist".format(list(set(column_names) - set(column_by_name)))
entities = list(set([column_by_name[column_name].entity for column_name in column_names] + [entity]))
if force_sum is False and entity != 'ind':
assert len(entities) == 1
data_frame = get_data_frame(column_names, survey_scenario, load_first = False, collection = None)
else:
if 'ind' in entities:
entities.remove('ind')
if entity is None and len(entities) == 1:
entity = entities[0]
data_frame_by_entity = dict()
individual_column_names = [
column_name for column_name in column_names if column_by_name[column_name].entity == 'ind'
]
for selected_entity in entities:
id_variables_column_names = ["id{}".format(selected_entity), "qui{}".format(selected_entity)]
individual_column_names.extend(id_variables_column_names)
selected_entity_column_names = [
column_name for column_name in column_names if column_by_name[column_name].entity == selected_entity
]
data_frame_by_entity[selected_entity] = get_data_frame(
selected_entity_column_names,
survey_scenario,
load_first = False,
collection = None
)
data_frame_by_entity[selected_entity]["id{}".format(entity)] = data_frame_by_entity[selected_entity].index
individual_data_frame = get_data_frame(
individual_column_names,
survey_scenario,
load_first = False,
collection = None
)
for other_entity in entities:
if other_entity != entity:
boolean_index = individual_data_frame["qui{}".format(other_entity)] == 0
index_other_entity = individual_data_frame.loc[boolean_index, "id{}".format(other_entity)].values
for column_name, column_series in data_frame_by_entity[other_entity].iteritems():
individual_data_frame.loc[boolean_index, column_name] \
= column_series.iloc[index_other_entity].values
individual_data_frame[column_name].fillna(0)
if entity == 'ind' and force_sum is False:
return individual_data_frame
entity_column_names = [
column_name for column_name in column_names if column_by_name[column_name].entity == entity
]
entity_data_frame = get_data_frame(
entity_column_names,
survey_scenario,
load_first = False,
collection = None
)
grouped_data_frame = individual_data_frame.groupby(by = "id{}".format(entity)).agg(sum)
grouped_data_frame.drop("qui{}".format(entity), axis = 1, inplace = True)
data_frame = concat([entity_data_frame, grouped_data_frame], axis = 1)
return data_frame
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
from openfisca_survey_manager.surveys import Survey, SurveyCollection
from openfisca_plugin_aggregates.tests.test_aggregates import create_survey_scenario
year = 2006
survey_scenario = create_survey_scenario(year)
# dump_simulation_results_data_frame(survey_scenario, collection = "openfisca")
df = get_data_frame(["af"], survey_scenario, load_first = True, collection = "openfisca")
print df | agpl-3.0 |
cdcapano/pycbc | examples/distributions/mass_examples.py | 14 | 1651 | import matplotlib.pyplot as plt
from pycbc import distributions
# Create a mass distribution object that is uniform between 0.5 and 1.5
# solar masses.
mass1_distribution = distributions.Uniform(mass1=(0.5, 1.5))
# Take 100000 random variable samples from this uniform mass distribution.
mass1_samples = mass1_distribution.rvs(size=1000000)
# Draw another distribution that is Gaussian between 0.5 and 1.5 solar masses
# with a mean of 1.2 solar masses and a standard deviation of 0.15 solar
# masses. Gaussian takes the variance as an input so square the standard
# deviation.
variance = 0.15*0.15
mass2_gaussian = distributions.Gaussian(mass2=(0.5, 1.5), mass2_mean=1.2,
mass2_var=variance)
# Take 100000 random variable samples from this gaussian mass distribution.
mass2_samples = mass2_gaussian.rvs(size=1000000)
# We can make pairs of distributions together, instead of apart.
two_mass_distributions = distributions.Uniform(mass3=(1.6, 3.0),
mass4=(1.6, 3.0))
two_mass_samples = two_mass_distributions.rvs(size=1000000)
# Choose 50 bins for the histogram subplots.
n_bins = 50
# Plot histograms of samples in subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3, = axes.flat
ax0.hist(mass1_samples['mass1'], bins = n_bins)
ax1.hist(mass2_samples['mass2'], bins = n_bins)
ax2.hist(two_mass_samples['mass3'], bins = n_bins)
ax3.hist(two_mass_samples['mass4'], bins = n_bins)
ax0.set_title('Mass 1 samples')
ax1.set_title('Mass 2 samples')
ax2.set_title('Mass 3 samples')
ax3.set_title('Mass 4 samples')
plt.tight_layout()
plt.show()
| gpl-3.0 |
wlamond/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 37 | 11979 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
sidtechnical/ALTwitter | scripts/create_div_for_index_page.py | 1 | 1298 | #!/usr/bin/env python
import pandas as pd
from django.template import Template, Context, loader
from django.conf import settings
settings.configure()
mep_df = pd.read_csv('clean_output.csv')
mep_full_data = mep_df.to_dict('r')
# print mep_full_data
# mep_img_urls_df = pd.read_csv('mep_prof_img_url.csv')
# mep_img_data = mep_img_urls_df.to_dict('r')
# print mep_img_data
template = """
{% for mep_row in mep_data %}
<div class="portfolio {{ mep_row.NAME|make_list|first }}" data-cat="{{ mep_row.NAME|make_list|first }}">
<div class="portfolio-wrapper">
<a href="pages/{{ mep_row.SCREEN_NAME }}.html"><img width="300" height ="200" src="{{ mep_row.prof_img_url }}" alt="{{ mep_row.NAME }}" /></a>
<div class="label">
<div class="label-text">
<a class="text-title">{{ mep_row.NAME }}</a>
<span class="text-category"> {{ mep_row.NATIONALITY }} </span>
</div>
<div class="label-bg"></div>
</div>
</div>
</div>
{% endfor %}
"""
t = Template(template)
c = Context({"mep_data": mep_full_data})
f1=open('index_divs.html', 'w+')
try:
f1.write(t.render(c).encode('utf-8'))
except UnicodeEncodeError:
f1.write(t.render(c).encode('ascii', 'ignore').decode('ascii'))
f1.close() | gpl-3.0 |
JamesDickenson/aima-python | grading/bayesian-submissions.py | 4 | 2414 | import importlib
import traceback
from grading.util import roster, print_table
# from logic import FolKB
# from utils import expr
import os
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
def indent(howMuch = 1):
space = ' '
for i in range(1, howMuch):
space += ' '
return space
def printKB(label, kb):
print(indent(), label + ' example:')
print(indent(2), 'knowledge base:')
for clause in kb.clauses:
print(indent(3), str(clause))
def printResults(query, gen, limit=3):
for count in range(limit):
try:
long = next(gen)
except StopIteration:
print()
return
short = {}
for v in long:
if v in query.args:
short[v] = long[v]
print(short, end=' ')
print('...')
def tryOne(label, frame):
fit = gnb.fit(frame.data, frame.target)
print('')
print_table(fit.theta_,
header=[frame.feature_names],
topLeft=['Means:'],
leftColumn=frame.target_names,
numfmt='%6.3f',
njust='center',
tjust='rjust',
)
y_pred = fit.predict(frame.data)
print("Number of mislabeled points out of a total %d points : %d"
% (len(frame.data), (frame.target != y_pred).sum()))
def tryExamples(examples):
for label in examples:
tryOne(label, examples[label])
submissions = {}
scores = {}
message1 = 'Submissions that compile:'
root = os.getcwd()
for student in roster:
try:
os.chdir(root + '/submissions/' + student)
# http://stackoverflow.com/a/17136796/2619926
mod = importlib.import_module('submissions.' + student + '.myBayes')
submissions[student] = mod.Examples
message1 += ' ' + student
except ImportError:
pass
except:
traceback.print_exc()
os.chdir(root)
print(message1)
print('----------------------------------------')
for student in roster:
if not student in submissions.keys():
continue
scores[student] = []
try:
examples = submissions[student]
print('Bayesian Networks from:', student)
tryExamples(examples)
except:
traceback.print_exc()
print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student])))
print('----------------------------------------') | mit |
istellartech/OpenGoddard | examples/10_Low_Thrust_Orbit_Transfer.py | 1 | 6942 | # -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Orbiter:
def __init__(self):
self.u_max = 0.01
self.r0 = 1.0
self.vr0 = 0.0
self.vt0 = 1.0
self.rf = 4.0
self.vrf = 0.0
self.vtf = 0.5
self.tf_max = 55
def dynamics(prob, obj, section):
r = prob.states(0, section)
vr = prob.states(1, section)
vt = prob.states(2, section)
ur1 = prob.controls(0, section)
ur2 = prob.controls(1, section)
ut1 = prob.controls(2, section)
ut2 = prob.controls(3, section)
dx = Dynamics(prob, section)
dx[0] = vr
dx[1] = vt**2 / r - 1 / r**2 + (ur1 - ur2)
dx[2] = - vr * vt / r + (ut1 - ut2)
return dx()
def equality(prob, obj):
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
tf = prob.time_final(-1)
result = Condition()
# event condition
result.equal(r[0], obj.r0)
result.equal(vr[0], obj.vr0)
result.equal(vt[0], obj.vt0)
result.equal(r[-1], obj.rf)
result.equal(vr[-1], obj.vrf)
result.equal(vt[-1], obj.vtf)
return result()
def inequality(prob, obj):
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
tf = prob.time_final(-1)
result = Condition()
# lower bounds
result.lower_bound(r, obj.r0)
result.lower_bound(ur1, 0.0)
result.lower_bound(ut1, 0.0)
result.lower_bound(ur2, 0.0)
result.lower_bound(ut2, 0.0)
result.lower_bound(tf, 0.0)
# upper bounds
result.upper_bound(r, obj.rf)
result.upper_bound(ur1, obj.u_max)
result.upper_bound(ut1, obj.u_max)
result.upper_bound(ur2, obj.u_max)
result.upper_bound(ut2, obj.u_max)
result.upper_bound(tf, obj.tf_max)
return result()
def cost(prob, obj):
return 0.0
def running_cost(prob, obj):
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
return (ur1 + ur2) + (ut1 + ut2)
# ========================
plt.close("all")
plt.ion()
# Program Starting Point
time_init = [0.0, 10.0]
n = [100]
num_states = [3]
num_controls = [4]
max_iteration = 10
flag_savefig = True
savefig_dir = "10_Low_Thrust_Orbit_Transfer/"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
obj = Orbiter()
# ========================
# Initial parameter guess
r_init = Guess.linear(prob.time_all_section, obj.r0, obj.rf)
# Guess.plot(prob.time_all_section, r_init, "r", "time", "r")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_r" + savefig_add + ".png")
vr_init = Guess.linear(prob.time_all_section, obj.vr0, obj.vrf)
# Guess.plot(prob.time_all_section, vr_init, "vr", "time", "vr")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_vr" + savefig_add + ".png")
vt_init = Guess.linear(prob.time_all_section, obj.vt0, obj.vtf)
# Guess.plot(prob.time_all_section, theta_init, "vt", "time", "vt")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_vt" + savefig_add + ".png")
ur1_init = Guess.linear(prob.time_all_section, obj.u_max, obj.u_max)
# Guess.plot(prob.time_all_section, ur1_init, "ur1", "time", "ur1")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_ur1" + savefig_add + ".png")
ut1_init = Guess.linear(prob.time_all_section, obj.u_max, obj.u_max)
# Guess.plot(prob.time_all_section, ut1_init, "ut1", "time", "ut1")
# if(flag_savefig):plt.savefig(savefig_dir + "guess_ut1" + savefig_add + ".png")
prob.set_states_all_section(0, r_init)
prob.set_states_all_section(1, vr_init)
prob.set_states_all_section(2, vt_init)
prob.set_controls_all_section(0, ur1_init)
prob.set_controls_all_section(2, ut1_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics]
prob.knot_states_smooth = []
prob.cost = cost
prob.running_cost = running_cost
prob.equality = equality
prob.inequality = inequality
def display_func():
tf = prob.time_final(-1)
print("tf: {0:.5f}".format(tf))
prob.solve(obj, display_func, ftol=1e-12)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
r = prob.states_all_section(0)
vr = prob.states_all_section(1)
vt = prob.states_all_section(2)
ur1 = prob.controls_all_section(0)
ur2 = prob.controls_all_section(1)
ut1 = prob.controls_all_section(2)
ut2 = prob.controls_all_section(3)
time = prob.time_update()
# ------------------------
# Visualizetion
plt.figure()
plt.plot(time, r, marker="o", label="r")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("r [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "r" + ".png")
plt.figure()
plt.plot(time, vr, marker="o", label="vr")
plt.plot(time, vt, marker="o", label="vt")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("velocity [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "velocity" + ".png")
plt.figure()
plt.plot(time, (ur1 - ur2), marker="o", label="ur")
plt.plot(time, (ut1 - ut2), marker="o", label="ut")
# plt.plot(time, ur1, marker="o", label="ur1")
# plt.plot(time, ur2, marker="o", label="ur2")
# plt.plot(time, ut1, marker="o", label="ut1")
# plt.plot(time, ut2, marker="o", label="ut2")
plt.grid()
plt.xlabel("time [-]")
plt.ylabel("thrust [-]")
# plt.ylim([-0.02, 0.6])
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_dir + "thrust" + ".png")
from scipy import integrate
from scipy import interpolate
theta = integrate.cumtrapz(vt / r, time, initial=0)
theta_f = interpolate.interp1d(time, theta)
r_f = interpolate.interp1d(time, r)
time_fine = np.linspace(time[0], time[-1], 1000)
r_fine = r_f(time_fine)
theta_fine = theta_f(time_fine)
fig = plt.figure()
# plt.plot(r*np.cos(theta), r*np.sin(theta))
plt.plot(r_fine*np.cos(theta_fine), r_fine*np.sin(theta_fine))
ax = fig.add_subplot(111)
circle0 = plt.Circle((0.0, 0.0), 1.0, ls="--", fill=False, fc='none')
circlef = plt.Circle((0.0, 0.0), 4.0, ls="--", fill=False, fc='none')
ax.add_patch(circle0)
ax.add_patch(circlef)
plt.grid()
plt.axis('equal')
plt.ylim((-4.1, 4.1))
if(flag_savefig): plt.savefig(savefig_dir + "trajectry" + ".png")
plt.show()
| mit |
liyu1990/sklearn | sklearn/mixture/gmm.py | 7 | 30564 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
gskielian/SimpleCV | SimpleCV/examples/util/ColorCube.py | 13 | 1901 | from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
for y in range(0,bins):
for z in range(0,bins):
b = ((x*skip)+offset)/255.0
g = ((y*skip)+offset)/255.0
r = ((z*skip)+offset)/255.0
idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
ax = fig.gca(projection='3d')
ax.set_xlabel('BLUE', color=(0,0,1) )
ax.set_ylabel('GREEN',color=(0,1,0))
ax.set_zlabel('RED',color=(1,0,0))
# Get the color histogram
img = cam.getImage().scale(0.3)
rgb = img.getNumpyCv2()
hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
hist = hist/np.max(hist)
# render everything
[ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
#[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
ax.set_xlim3d(0, bins-1)
ax.set_ylim3d(0, bins-1)
ax.set_zlim3d(0, bins-1)
azim = (azim+0.5)%360
ax.view_init(elev=35, azim=azim)
########### convert matplotlib to SimpleCV image
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
surf = pg.image.fromstring(raw_data, size, "RGB")
figure = Image(surf)
############ All done
figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
result = figure.blit(img, pos=(20,20))
result.save(disp)
fig.clf()
| bsd-3-clause |
desihub/desimodel | py/desimodel/footprint.py | 1 | 23205 | # See LICENSE.rst for BSD 3-clause license info
# -*- coding: utf-8 -*-
"""
desimodel.footprint
===================
Utility functions for working with the DESI footprint.
"""
import os
from time import time
import numpy as np
from desiutil.log import get_logger
from .io import load_tiles
from . import __version__ as desimodel_version
log = get_logger()
_pass2program = None
def pass2program(tilepass):
'''Converts integer tile pass number to string program name.
Args:
tilepass (int or int array): tiling pass number.
Returns:
Program name for each pass (str or list of str).
'''
global _pass2program
if _pass2program is None:
tiles = load_tiles()
_pass2program = dict(set(zip(tiles['PASS'], tiles['PROGRAM'])))
if np.isscalar(tilepass):
return _pass2program[tilepass]
else:
return [_pass2program[p] for p in tilepass]
def program2pass(program):
'''Convert string program name to tile passes for that program.
Args:
program (str for str array): program name, *e.g.* DARK, BRIGHT, or GRAY.
Returns:
List of integer passes that cover that program, or list of lists
if input was array-like.
'''
tiles = load_tiles()
if np.isscalar(program):
passes = sorted(list(set(tiles['PASS'][tiles['PROGRAM'] == program])))
if len(passes) > 0:
return passes
else:
known_programs = set(tiles['PROGRAM'])
msg = 'Unknown program {}; known programs are {}'.format(
program, known_programs)
raise ValueError(msg)
else:
program = np.asarray(program)
passes = [None,] * len(program)
for thisprogram in np.unique(program):
thesepasses = program2pass(thisprogram)
for i in np.where(program == thisprogram)[0]:
passes[i] = thesepasses
return passes
def radec2pix(nside, ra, dec):
'''Convert `ra`, `dec` to nested pixel number.
Args:
nside (int): HEALPix `nside`, ``2**k`` where 0 < k < 30.
ra (float or array): Right Accention in degrees.
dec (float or array): Declination in degrees.
Returns:
Array of integer pixel numbers using nested numbering scheme.
Notes:
This is syntactic sugar around::
hp.ang2pix(nside, ra, dec, lonlat=True, nest=True)
but also works with older versions of healpy that didn't have
`lonlat` yet.
'''
import healpy as hp
theta, phi = np.radians(90-dec), np.radians(ra)
if np.isnan(np.sum(theta)) :
raise ValueError("some NaN theta values")
if np.sum((theta < 0)|(theta > np.pi))>0 :
raise ValueError("some theta values are outside [0,pi]: {}".format(theta[(theta < 0)|(theta > np.pi)]))
return hp.ang2pix(nside, theta, phi, nest=True)
def tiles2pix(nside, tiles=None, radius=None, per_tile=False, fact=2**7):
'''Returns sorted array of pixels that overlap the tiles.
Args:
nside (int): HEALPix `nside`, ``2**k`` where 0 < k < 30.
tiles (array-like or Table-like, optional): Integer tile IDs,
or ``None`` to use all DESI tiles from
:func:`desimodel.io.load_tiles`.
radius (float, optional): tile radius in degrees;
if ``None`` use :func:`desimodel.focalplane.get_tile_radius_deg`.
per_tile (bool, optional): If ``True``, return a list of arrays of
pixels per tile.
fact (int, optional): Factor healpy uses to resolve pixel overlaps.
When this is large there are fewer false positives at the expense
of run time (although ``fact=2**8`` seems fast). Must be a
power of 2.
Returns:
Integer array of pixel numbers that cover these tiles; or
if per_tile is `True`, returns list of arrays such that ``pixels[i]``
is an array of pixel numbers covering ``tiles[i]``.
'''
import healpy as hp
from .focalplane import get_tile_radius_deg
if tiles is None:
tiles = load_tiles()
if radius is None:
radius = get_tile_radius_deg()
theta, phi = np.radians(90-tiles['DEC']), np.radians(tiles['RA'])
vec = hp.ang2vec(theta, phi)
ipix = [hp.query_disc(nside, vec[i], radius=np.radians(radius),
inclusive=True, nest=True, fact=fact) for i in range(len(tiles))]
if per_tile:
return ipix
else:
return np.sort(np.unique(np.concatenate(ipix)))
def tileids2pix(nside, tileids, radius=None, per_tile=False):
'''Like :func:`~desimodel.footprint.tiles2pix`, but accept integer
tileid or list of tileids instead of table of tiles.
'''
tiles = load_tiles()
ii = np.in1d(tiles['TILEID'], tileids)
if np.count_nonzero(ii) == np.asarray(tileids).size:
return tiles2pix(nside, tiles[ii], radius=radius, per_tile=per_tile)
else:
extra = set(tileids) - set(tiles['TILEID'])
raise ValueError('{}/{} TILEID(s) not in DESI footprint: {}'.format(
len(extra), len(tileids), extra))
def tiles2fracpix(nside, step=1, tiles=None, radius=None, fact=2**7):
'''Returns a sorted array of just the *fractional* pixels that overlap the
tiles.
Args:
nside (int): HEALPix `nside`, ``2**k`` where 0 < k < 30.
step (int, optional): The number of integration steps around the edges
of a HEALPix pixel. ``step=1`` means just the pixel vertices.
``step=2`` means the vertices and the corners and the points halfway
between the vertices. See also the
`HEALPix boundary document <http://healpy.readthedocs.io/en/latest/generated/healpy.boundaries.html>`_ .
tiles (Table-like, optional): Table-like with RA,DEC columns; or
``None`` to use all DESI tiles from :func:`desimodel.io.load_tiles`.
radius (float, optional): Tile radius in degrees;
if ``None`` use :func:`desimodel.focalplane.get_tile_radius_deg`.
fact (int, optional): Factor healpy uses to resolve pixel overlaps.
When this is large there are fewer false positives at the expense
of run time (although ``fact=2**8`` seems fast). Must be a
power of 2.
Returns:
Integer array of pixel numbers that cover these tiles, *excluding
pixels that fully overlap the tiles* (*i.e.*, just pixels that
*partially* overlap the tiles). The integers are sorted.
Notes:
There are potentially malicious cases where a pixel just brushes
a tile, such that there is a very small area where the pixel overlaps
the tile. To guard against these case, call this function with
progressively larger step values until it converges.
'''
#ADM set up healpy and set default tiles and radius
import healpy as hp
from .focalplane import get_tile_radius_deg
if tiles is None:
tiles = load_tiles()
if radius is None:
radius = get_tile_radius_deg()
#ADM obtain ALL pixels that overlap the tiles (and perhaps a
#ADM few more if fact is a small number
pix = tiles2pix(nside, tiles=tiles, radius=radius, fact=fact)
#ADM the recovered number of pixels, and the total number of points
#ADM that will be integrated around the boundary of the pixel
npix = len(pix)
nvertsperpix = 4*step
#ADM find points around the boundary of all pixels in Cartesian coordinates
xyzverts = hp.boundaries(nside,pix,step=step,nest=True)
#ADM convert to RA/Dec
theta, phi = hp.vec2ang(np.hstack(xyzverts).T)
ra, dec = np.degrees(phi), 90-np.degrees(theta)
#ADM calculate which boundary points are in the tiles
verts_in = is_point_in_desi(tiles, ra, dec, radius=radius)
#ADM reshape this into an array with nvertsperpix columns
pix_verts_in = np.reshape(verts_in,(npix,nvertsperpix))
#ADM any row with a column not in the tiles must be a fractional pixel
isfracpix = ~np.all(pix_verts_in,axis=1)
#ADM the pixel integers where pixels are fractional
return pix[np.where(isfracpix)]
def pixweight(nside, tiles=None, radius=None, precision=0.01, outfile=None, outplot=None):
'''Create an array of the fraction of each pixel that overlaps the passed tiles.
Args:
nside (int): HEALPix `nside`, ``2**k`` where 0 < k < 30.
tiles (Table-like, optional): Table-like with RA,DEC columns; or
``None`` to use all DESI tiles from :func:`desimodel.io.load_tiles`.
radius (float, optional): Tile radius in degrees;
if `None` use :func:`desimodel.focalplane.get_tile_radius_deg`.
precision (float, optional): Approximate precision at which to
calculate the area of pixels that partially overlap the footprint
in SQUARE DEGREES (*e.g.* 0.01 means precise to
0.01 sq. deg., or 36 sq. arcmin.). Lower numbers mean better precision.
outfile (str, optional): Write the pixel->weight array to the file
passed as `outfile` (could be full directory path + file).
outplot (str, optional): Create a plot named `outplot`
(pass a *name* for a plot in the current directory, a *full path*
for a plot in a different directory). This is passed to
matplotlib.pyplot's savefig routine.
Returns pixweight:
An array of the weight for each pixel at the passed nside. The
weight is the fracion of the pixel that overlaps the passed tiles:
`WEIGHT=1` for the pixel is entirely contained in the tiles;
`WEIGHT=0` for the pixel is entirely outside of the tiles;
`0 < WEIGHT < 1` for a pixel that overlaps the tiles.
The index of the array is the HEALPixel integer.
Notes:
It is sufficient to create the weights at a suitably high nside, say
nside=256 (0.052456 sq. deg. per pixel) as pixel numbers at
lower nsides can be obtained by integer division by powers of 4, *e.g.*
pix_@_nside_128 = pix@nside_256//4 and fractional weights at lower
nsides are the mean of the 4 pixels at the higher nside
:func:`desimodel.io.load_pixweight` can downsample the array to lower nsides.
'''
t0 = time()
# ADM if tiles or radius is None, load the DESI model defaults.
from .focalplane import get_tile_radius_deg
if tiles is None:
tiles = load_tiles()
if radius is None:
radius = get_tile_radius_deg()
#ADM create an array that is zero for each integer pixel at this nside
import healpy as hp
npix = hp.nside2npix(nside)
weight = np.zeros(npix,float)
#ADM recover pixels that are likely to be in the DESI footprint and
#ADM set their weight to one (it's the case, then, that anything that
#ADM is *definitely outside of* the footprint has a weight of zero)
pix = tiles2pix(nside, tiles=tiles, radius=radius, fact=2**8)
weight[pix] = 1.
#ADM loop through to find the "edge" (fractional) pixels, until convergence
log.info('Start integration around partial pixels...')
setfracpix = set([-1])
#ADM only have a limited range, to prevent this running forever
for i in range(20):
log.info('Trying {} pixel boundary points (step={})...t={:.1f}s'
.format(4*2**i,2**i,time()-t0))
#ADM find the fractional pixels at this step
fracpix = tiles2fracpix(nside, step=2**i, tiles=tiles, radius=radius,
fact=2**8)
log.info('...found {} fractional pixels...t={:.1f}s'
.format(len(fracpix),time()-t0))
if set(fracpix) == setfracpix:
break
#ADM if we didn't converge, loop through again with the new
#ADM set of fractional pixels
setfracpix = set(fracpix)
#ADM warn the user if the integration didn't converge at 4*2**20 boundary points
if i == 20:
log.warning('Integration around pixel boundaries did NOT converge!')
#ADM create a mask that is True for fractional pixels, false for all other pixels
mask = np.zeros(npix,bool)
mask[fracpix] = True
#ADM find the minimum and maximum dec of interest (there's no need to Monte Carlo
#ADM integrate over declinations that lie beyond the fractional pixels)
xyzverts = hp.boundaries(nside,fracpix,nest=True)
theta, phi = hp.vec2ang(np.hstack(xyzverts).T)
ra, dec = np.degrees(phi), 90-np.degrees(theta)
decmin, decmax = np.min(dec), np.max(dec)
sindecmin, sindecmax = np.sin(np.radians(decmin)), np.sin(np.radians(decmax))
area = 360.*np.degrees(sindecmax-sindecmin)
log.info('Populating randoms between {:.2f} and {:.2f} degrees, an area of {:.1f} sq. deg....t={:.1f}s'
.format(decmin,decmax,area,time()-t0))
#ADM determine the required precision for the area of interest
nptpersqdeg = int((1./precision)**2)
npt = int(nptpersqdeg * area)
log.info('Generating {} random points...t={:.1f}s'.format(npt,time()-t0))
#ADM loop over chunks (if npt > 1e7) to reach npt points while avoiding memory issues
nchunk = int(1e7)
pixinmask = []
rainmask = []
decinmask = []
cnt = 0
while cnt < npt:
#ADM if a chunk would pass too many points (> npt), revert to the remaining number
#ADM of points instead of creating a full chunk
if nchunk + cnt > npt:
nchunk = npt - cnt
#ADM populate the portion of the sphere of interest with random points
ra = np.random.uniform(0.,360.,nchunk)
dec = np.degrees(np.arcsin(1.-np.random.uniform(1-sindecmax,1-sindecmin,nchunk)))
#ADM convert the random points to pixel number
pix = radec2pix(nside,ra,dec)
#ADM retain random points for which the mask is True (i.e. just the fractional pixels)
inmask = np.where(mask[pix])[0]
decinmask.append(dec[inmask])
rainmask.append(ra[inmask])
pixinmask.append(pix[inmask])
cnt += nchunk
log.info('...generated {} random points...t={:.1f}s'
.format(cnt,time()-t0))
#ADM collapse the 2-D chunks into a 1-D array
from itertools import chain
rainmask = np.array(list(chain.from_iterable(rainmask)))
decinmask = np.array(list(chain.from_iterable(decinmask)))
pixinmask = np.array(list(chain.from_iterable(pixinmask)))
log.info('{} of the random points are in fractional pixels...t={:.1f}s'
.format(len(pixinmask),time()-t0))
#ADM find which random points in the fractional pixels are in the DESI footprint
log.info('Start integration over fractional pixels at edges of DESI footprint...')
indesi = is_point_in_desi(tiles,rainmask,decinmask)
log.info('...{} of the random points in fractional pixels are in DESI...t={:.1f}s'
.format(np.sum(indesi),time()-t0))
#ADM assign the weights of the fractional pixels as the fraction of random points
#ADM in the fractional pixels that are in the DESI footprint
allinfracpix = np.histogram(pixinmask,bins=np.arange(npix))[0][fracpix]
desiinfracpix = np.histogram(pixinmask[np.where(indesi)],bins=np.arange(npix))[0][fracpix]
#ADM guard against integer division (for backwards-compatability with Python2)
#ADM and create the final array of weights
weight[fracpix] = desiinfracpix.astype('float64')/allinfracpix
if outfile is not None:
#ADM write information indicating HEALPix setup to file header
#ADM include desimodel version as a check in case footprint changes
import fitsio
from desiutil import depend
hdr = fitsio.FITSHDR()
depend.setdep(hdr, 'desimodel', desimodel_version)
hdr['PRECISE'] = precision
hdr['HPXNSIDE'] = nside
hdr['HPXNEST'] = True
fitsio.write(outfile, weight, extname='PIXWEIGHTS', header=hdr, clobber=True)
#ADM if outplot was passed, make a plot of the final mask in Mollweide projection
if outplot is not None:
import matplotlib.pyplot as plt
hp.mollview(weight, nest=True)
plt.savefig(outplot)
log.info('Done...t={:.1f}s'.format(time()-t0))
return weight
def pix2tiles(nside, pixels, tiles=None, radius=None):
'''Returns subset of tiles that overlap the list of pixels.
Args:
nside (int): HEALPix `nside`, ``2**k`` where 0 < k < 30.
pixels (array-like): Array of integer pixels using nested numbering scheme.
tiles (Table-like, optional): Table-like with RA,DEC columns; or
``None`` to use all DESI tiles from :func:`desimodel.io.load_tiles`.
radius (float, optional): Tile radius in degrees;
if `None` use :func:`desimodel.focalplane.get_tile_radius_deg`.
Returns:
Table of tiles that cover these pixels.
TODO: add support for tiles as integers or list/array of integer TILEIDs.
'''
import healpy as hp
from .focalplane import get_tile_radius_deg
if tiles is None:
tiles = load_tiles()
if radius is None:
radius = get_tile_radius_deg()
#- Trim tiles to ones that *might* overlap these pixels
theta, phi = hp.pix2ang(nside, pixels, nest=True)
ra, dec = np.degrees(phi), 90 - np.degrees(theta)
pixsize = np.degrees(hp.nside2resol(nside))
ii = find_tiles_over_point(tiles, ra, dec, radius=radius+pixsize)
if np.isscalar(pixels):
tiles = tiles[ii]
else:
ii = np.unique(np.concatenate(ii))
tiles = tiles[ii]
#- Now check in detail
theta, phi = np.radians(90-tiles['DEC']), np.radians(tiles['RA'])
vec = hp.ang2vec(theta, phi)
ii = list()
for i in range(len(tiles)):
tilepix = hp.query_disc(nside, vec[i], radius=np.radians(radius), inclusive=True, nest=True)
if np.any(np.in1d(pixels, tilepix)):
ii.append(i)
return tiles[ii]
def _embed_sphere(ra, dec):
"""Embed `ra`, `dec` to a uniform sphere in three dimensions.
"""
phi = np.radians(np.asarray(ra))
theta = np.radians(90.0 - np.asarray(dec))
r = np.sin(theta)
x = r * np.cos(phi)
y = r * np.sin(phi)
z = np.cos(theta)
return np.array((x, y, z)).T
def is_point_in_desi(tiles, ra, dec, radius=None, return_tile_index=False):
"""If a point (`ra`, `dec`) is within `radius` distance from center of any
tile, it is in DESI.
Args:
tiles (Table-like): The output of :func:`desimodel.io.load_tiles`, or
a similar Table.
ra (scalar or array-like): Right Ascension in degrees.
dec (scalar or array-like): Declination in degrees. The size of `dec`
must match the size of `ra`.
radius (float, optional): Tile radius in degrees;
if `None` use :func:`desimodel.focalplane.get_tile_radius_deg`.
return_tile_index (bool, optional): If ``True``, return the index of
the nearest tile in tiles array.
Returns:
Return ``True`` if points given by `ra`, `dec` lie in the set of `tiles`.
Notes:
This function is optimized to query a lot of points.
"""
from scipy.spatial import cKDTree as KDTree
from .focalplane import get_tile_radius_deg
if radius is None:
radius = get_tile_radius_deg()
tilecenters = _embed_sphere(tiles['RA'], tiles['DEC'])
tree = KDTree(tilecenters)
# radius to 3d distance
threshold = 2.0 * np.sin(np.radians(radius) * 0.5)
xyz = _embed_sphere(ra, dec)
if not xyz.flags['C_CONTIGUOUS']:
xyz = xyz.copy()
d, i = tree.query(xyz, k=1)
indesi = d < threshold
if return_tile_index:
return indesi, i
else:
return indesi
def find_tiles_over_point(tiles, ra, dec, radius=None):
"""Return a list of indices of tiles that covers the points.
This function is optimized to query a lot of points.
radius is in units of degrees. The return value is an array
of list objects that are the indices of tiles that cover each point.
The indices are not sorted in any particular order.
if ra, dec are scalars, a single list is returned.
default radius is from desimodel.focalplane.get_tile_radius_deg()
"""
from scipy.spatial import cKDTree as KDTree
from .focalplane import get_tile_radius_deg
if radius is None:
radius = get_tile_radius_deg()
tilecenters = _embed_sphere(tiles['RA'], tiles['DEC'])
tree = KDTree(tilecenters)
# radius to 3d distance
threshold = 2.0 * np.sin(np.radians(radius) * 0.5)
xyz = _embed_sphere(ra, dec)
if not xyz.flags['C_CONTIGUOUS']:
xyz = xyz.copy()
indices = tree.query_ball_point(xyz, threshold)
return indices
def find_points_in_tiles(tiles, ra, dec, radius=None):
"""Return a list of indices of points that are within each provided tile(s).
This function is optimized to query a lot of points with relatively few tiles.
radius is in units of degrees. The return value is an array
of lists that contains the index of points that are in each tile.
The indices are not sorted in any particular order.
if tiles is a scalar, a single list is returned.
default radius is from desimodel.focalplane.get_tile_radius_deg()
"""
return find_points_radec(tiles['RA'], tiles['DEC'], ra, dec, radius)
def find_points_radec(telra, teldec, ra, dec, radius = None):
"""Return a list of indices of points that are within a radius of an arbitrary telra, teldec.
This function is optimized to query a lot of points with a single telra and teldec.
radius is in units of degrees. The return value is a list
that contains the index of points that are in each tile.
The indices are not sorted in any particular order.
if tiles is a scalar, a single list is returned.
default radius is from desimodel.focalplane.get_tile_radius_deg()
Note: This is simply a modified version of find_points_in_tiles, but this function does not know about tiles.
"""
from scipy.spatial import cKDTree as KDTree
from .focalplane import get_tile_radius_deg
if radius is None:
radius = get_tile_radius_deg()
# check for malformed input shapes. Sorry we currently only
# deal with vector inputs. (for a sensible definition of indices)
assert ra.ndim == 1
assert dec.ndim == 1
points = _embed_sphere(ra, dec)
tree = KDTree(points)
# radius to 3d distance
threshold = 2.0 * np.sin(np.radians(radius) * 0.5)
xyz = _embed_sphere(telra, teldec)
if not xyz.flags['C_CONTIGUOUS']:
xyz = xyz.copy()
indices = tree.query_ball_point(xyz, threshold)
return indices
def get_tile_radec(tileid):
"""Get the coordinates of a tile.
Args:
tileid (int): ID of a tile.
Returns:
tuple: (ra, dec) in degrees for the requested `tileid`.
Raises:
ValueError: If tileid is not in list of known tiles.
"""
tiles = load_tiles()
if tileid in tiles['TILEID']:
i = np.where(tiles['TILEID'] == tileid)[0][0]
return tiles[i]['RA'], tiles[i]['DEC']
else:
raise ValueError('Unknown tileid {}'.format(tileid))
| bsd-3-clause |
goldmanm/tools | analysis.py | 1 | 9967 | # -*- coding: utf-8 -*-
import numpy as np
import cantera as ct
import pandas as pd
import re
import warnings
import copy
###################################
# 3b. output data analysis
###################################
def branching_ratios(df, solution, compound, production = False):
"""
This method looks at the consumption pathways of `compound` over
all time points in the data set.
It outputs a pandas.DataFrame which contains columns of pertinant reactions
and values of the branching ratio of each reaction which is defined as
$BR_{i} = \frac{ROC_i}{\Sigma_{j=0}^{j=N} ROC_j }$
where $i$ is the reaction in question, $ROC$ is the rate of consumption of
the desired species, and $N$ is the number of reactions, and $BR$ is the branching ratio.
df = dataframe of run data
solution = cantera solution object
compound = species string which you want to identify
production = if True, shows the reactions forming species X
This method only works on forward reactions
"""
reaction_dataframe = weight_reaction_dataframe_by_stoich_coefficients(df,solution,compound)
if not production:
#only keep consumption
consumption_terms = reaction_dataframe[reaction_dataframe < 0]
df = consumption_terms.dropna('columns','all')
else:
production_terms = reaction_dataframe[reaction_dataframe > 0]
df = production_terms.dropna('columns','all')
total = df.sum('columns')
branching_ratios = df.div(total,'index')
branching_ratios = branching_ratios.fillna(0)
#sort from most important
importance_index = branching_ratios.sum('index').sort_values(ascending=False)
branching_ratios = branching_ratios.reindex(importance_index.index,axis='columns')
return branching_ratios
def consumption_pathways(solution,df,species, time = 'all'):
"""
returns the total rate of production for a particular species at the specified
time(s). Postive values indicate production, negative values indicate consumption
If multiple times are given or the keyword 'all' is used, the output is a DataFrame
with indexes the various times. If only one time is supplied, the output is a
Series.
solution = cantera solution object
df = pandas dataframe of reactions
species = string of species
time = number describing the time points to determine consumption (or list of numbers)
"""
if time=='all':
time = list(df.index)
if isinstance(time,list):
# recursively run consumption_pathways
consumption_values = []
for t in time:
consumption_values.append(consumption_pathways(solution=solution,
df=df,
species=species,
time= t))
consumption_values = pd.DataFrame(consumption_values, index=time)
# sort by total sum of flux
sorted_index = consumption_values.sum('index').sort_values().keys()
return consumption_values[sorted_index]
# the time is not a list, return a pd.Series
try:
reactions_weighted = find_reactions(solution, df,species).loc[time,:]
except KeyError:
reactions_weighted = find_reactions(solution, df,species).loc[return_nearest_time_index(time,df.index, index=False),:]
# weight by stoichiometric_coefficients
stoich_coeffs = [obtain_stoichiometry_of_species(solution, species, reaction) for reaction in reactions_weighted.index]
stoich_coeff_dict = pd.Series(dict(zip(reactions_weighted.index,stoich_coeffs)))
# pandas was having some bug, so manually rewrote the line below
#reactions_weighted *= stoich_coeff_dict
for index in stoich_coeff_dict.index:
reactions_weighted[index] *= stoich_coeff_dict[index]
return reactions_weighted.sort_values()
def quasi_steady_state(df, species):
"""
This method outputs the key parameter, $\frac{|ROP-ROC|}{ROP}$, in quasi steady state
approximation.
df = pd.DataFrame containing get_rop_and_roc_series
species = string of species to use
returns a pd.Series of the qss apprixmation: $\frac{|ROP-ROC|}{ROP}$
"""
return (df['production',species] - df['consumption',species]).abs() / df['production',species]
def compare_species_profile_at_one_time(desired_time, df1,df2,
minimum_return_value=1e-13,
time_string = 'time (s)'):
"""
compares the species profile between two models closest to the desired time
returns a pandas.Series object with the relative species concentrations
given by `compare_2_data_sets`
"""
time_index_1 = return_nearest_time_index(desired_time,df1[time_string])
time_index_2 = return_nearest_time_index(desired_time,df2[time_string])
time_slice_1 = find_species(df1).loc[time_index_1]
time_slice_2 = find_species(df2).loc[time_index_2]
return _compare_2_data_sets(time_slice_1,time_slice_2,minimum_return_value)
def _compare_2_data_sets(model1, model2, minimum_return_value = 1000,diff_returned=0.0):
"""given two pd.Series of data, returns a pd.Series with the relative
differences between the two sets. This requires one of the values to be
above the `minimum_return_cutoff` and the difference to be above `diff_returned`
The difference is returned as $\frac{model1 - model2}{\min(model1,model2)}$.
Where the minimum merges the two datasets using the minimum value at each index.
"""
#ensure all values are the same
model1 = copy.deepcopy(model1)[model2.index].dropna()
model2 = copy.deepcopy(model2)[model1.index].dropna()
minimum_value = pd.DataFrame({'model1':model1,'model2':model2}).min(1)
compared_values = ((model1-model2)/minimum_value).dropna()
for label in compared_values.index:
not_enough_value = (model1[label] < minimum_return_value and model2[label] < minimum_return_value)
not_enough_difference = abs(compared_values[label]) < diff_returned
if not_enough_value or not_enough_difference:
compared_values[label] = np.nan
compared_values = compared_values.dropna()
return compared_values.sort_values()
def return_nearest_time_index(desired_time,time_series,index=True):
"""
input the desired time, double, and time_series, pd.Series,
returns the index of the time_series.
If you want the actual time value, change index=False
"""
# commented out due to error in mp.argmin
#nearest_value = lambda value, array: np.argmin(abs(value-array))
#if index:
# return nearest_value(desired_time,time_series)
#return time_series[nearest_value(desired_time,time_series)]
deviation_list = abs(desired_time-time_series)
min_deviation = min(deviation_list)
index_value = list(deviation_list).index(min_deviation)
if index:
return index_value
return time_series[index_value]
def obtain_stoichiometry_of_species(solution, species, reaction):
"""
this method finds a reaction string in the cantera solution file, and
returns its stoichiometric coefficient of the specified species.
Returns a negative value if the species is a reactant.
solution = cantera solution object
species = string of species name
reaction = reaction string or list of reaction strings.
Stoichiometry is calculated by: product_stoich_coeff - reactant_stoich_coeff
"""
# recursively deal with lists of reactions
if not isinstance(reaction,str):
coefficients = np.empty(len(reaction))
for index, reaction_string in enumerate(reaction):
coefficients[index] = obtain_stoichiometry_of_species(solution,species,reaction_string)
return coefficients
# deal with individual reactions
assert isinstance(reaction,str)
reaction_index = solution.reaction_equations().index(reaction)
reactant_stoich_coeff = solution.reactant_stoich_coeff(species, reaction_index)
product_stoich_coeff = solution.product_stoich_coeff(species, reaction_index)
if product_stoich_coeff > 0 or reactant_stoich_coeff > 0:
return product_stoich_coeff - reactant_stoich_coeff
raise Exception('Species {} is not in reaction {}'.format(species,reaction))
def weight_reaction_dataframe_by_stoich_coefficients(df, solution, species):
"""
returns a dataframe of reactions over time weighted by the stoichiometric
coefficient of the species string `species`.
"""
reactions = find_reactions( solution, df, species)
reaction_strings = list(reactions.columns)
stoichiometries = obtain_stoichiometry_of_species(solution,
species,
reaction_strings)
return reactions * stoichiometries
def find_reactions(solution, df,species):
"""
finds the reaction columns in the net_reaction dataframe which contain
the species specified and returns them.
"""
included_columns = []
rxn_string_to_rxn_index = dict(zip(solution.reaction_equations(),range(solution.n_reactions)))
for rxn_name in df.columns:
sln_index = rxn_string_to_rxn_index[rxn_name]
try:
if solution.product_stoich_coeff(species,sln_index) !=0 or \
solution.reactant_stoich_coeff(species,sln_index) !=0:
included_columns.append(rxn_name)
except KeyError:
print("Error obtained in find_reactions,\ncheck to ensure the columns in `df`\ncorrespond to the reactions in `solution`")
raise
df_my_reactions = df[included_columns]
if df_my_reactions.empty:
raise Exception('No reactions found for species {}'.format(species))
return df_my_reactions
| mit |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/sankey.py | 1 | 40142 | #!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Added assertions to catch common calling errors
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey:
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
assert gap >= 0, (
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
assert radius <= gap, (
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
assert head_angle >= 0, (
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
assert tolerance >= 0, (
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
#[6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
#path[1] = path[1][-1:0:-1]
#path[1][0] = first_action
#path[2] = path[2][::-1]
#return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
assert len(orientations) == n, (
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
assert len(labels) == n, (
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
assert trunklength >= 0, (
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
assert prior >= 0, "The index of the prior diagram is negative."
assert min(connect) >= 0, (
"At least one of the connection indices is negative.")
assert prior < len(self.diagrams), (
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
assert connect[0] < len(self.diagrams[prior].flows), (
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
assert connect[1] < n, (
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
assert self.diagrams[prior].angles[connect[0]] is not None, (
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
assert abs(flow_error) < self.tolerance, (
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(list(zip(orientations, are_inputs))):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
assert orient == -1, (
"The value of orientations[%d] is %d, "
"but it must be -1, 0, or 1." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
assert len(pathlengths) == n, (
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(list(zip(angles, are_inputs,
scaled_flows))):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths))))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths))))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_point
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_point
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
| gpl-3.0 |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/best_kNN_PCA/test11_cross_validate_categories_1200ms_scaled_method_v_force_motion.py | 1 | 5088 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
rayNymous/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
harta55/EnTAP | libs/TransDecoder-v5.3.0/util/misc/plot_indiv_seq_likelihood_profile.py | 2 | 3191 | #!/usr/bin/env python
import os,sys
import re
import matplotlib.pyplot as plt
import argparse
import subprocess
import numpy as np
import collections
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="plot likelihood profile for sequence ")
parser.add_argument("--orf_id", type=str, required=True, help="orf accession")
parser.add_argument("--longest_orfs_cds", type=str, required=True, help="long orfs cds file")
parser.add_argument("--kmer_scores", type=str, required=True, help= "kmer likelihood score file")
parser.add_argument("--sort", action='store_true')
parser.add_argument("--cumsum", action='store_true')
parser.add_argument("--max_repeat", type=int, required=False, default=None, help="max repeat count for framed hexamer")
args = parser.parse_args()
def main():
seq = get_seq(args.orf_id, args.longest_orfs_cds)
framed_kmers_to_likelihoods = parse_kmer_likelihoods(args.kmer_scores)
score_vec = score_seq(seq, framed_kmers_to_likelihoods)
print("sum: {}".format(sum(score_vec)))
if args.sort:
score_vec.sort()
if args.cumsum:
plt.plot(range(1,len(score_vec)+1), np.cumsum(score_vec), marker ='o')
else:
plt.plot(range(1,len(score_vec)+1), score_vec, marker ='+')
plt.show()
def score_seq(seq, framed_kmer_likelihoods):
score_vec = []
seq = seq.upper()
framed_kmer_counter = collections.defaultdict(int)
for i in range(0, len(seq)):
frame = i % 3
markov_use = min(i, 5)
kmer = seq[i-markov_use:i+1]
codon = seq[i:i+3]
#print "codon: {}, frame: {}".format(codon, frame)
# don't include stop codon
if i == len(seq)-2-1 and frame == 0:
if codon in ('TAA', 'TAG', 'TGA'):
break
#print("i:{}, markov_use:{}, kmer:{}".format(i, markov_use, kmer))
framed_kmer = "{}-{}".format(kmer, frame)
framed_kmer_counter[framed_kmer] += 1
if args.max_repeat is not None and framed_kmer_counter[framed_kmer] > args.max_repeat:
continue
loglikelihood = framed_kmer_likelihoods[framed_kmer]
print("i:{}, {}, likelihood: {}".format(i, framed_kmer, loglikelihood))
score_vec.append(loglikelihood)
return score_vec
def parse_kmer_likelihoods(kmer_scores_file):
framed_kmers_to_likelihoods = {}
with open(kmer_scores_file) as fh:
for line in fh:
if re.search("^#", line): continue
line = line.rstrip()
(framed_kmer, count, countkmerminus1, likelihood) = line.split("\t")
framed_kmers_to_likelihoods[framed_kmer] = float(likelihood)
return framed_kmers_to_likelihoods
def get_seq(orf_id, fasta_file):
cmd = "samtools faidx {} \"{}\"".format(fasta_file, orf_id)
fasta_entry = subprocess.check_output(cmd, shell=True)
print(fasta_entry)
lines = fasta_entry.split("\n")
header = lines.pop(0)
seq = "".join(lines)
seq = seq.replace(" ", "")
return seq
if __name__ == '__main__':
main()
| gpl-3.0 |
GaZ3ll3/scikit-image | skimage/filters/_gabor.py | 4 | 6920 | import numpy as np
from scipy import ndimage as ndi
from .._shared.utils import assert_nD
__all__ = ['gabor_kernel', 'gabor_filter']
def _sigma_prefactor(bandwidth):
b = bandwidth
# See http://www.cs.rug.nl/~imaging/simplecell.html
return 1.0 / np.pi * np.sqrt(np.log(2) / 2.0) * \
(2.0 ** b + 1) / (2.0 ** b - 1)
def gabor_kernel(frequency, theta=0, bandwidth=1, sigma_x=None, sigma_y=None,
n_stds=3, offset=0):
"""Return complex 2D Gabor filter kernel.
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
Harmonic function consists of an imaginary sine function and a real
cosine function. Spatial frequency is inversely proportional to the
wavelength of the harmonic and to the standard deviation of a Gaussian
kernel. The bandwidth is also inversely proportional to the standard
deviation.
Parameters
----------
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations
offset : float, optional
Phase offset of harmonic function in radians.
Returns
-------
g : complex array
Complex filter kernel.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filter import gabor_kernel
>>> from skimage import io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> gk = gabor_kernel(frequency=0.2)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # more ripples (equivalent to increasing the size of the
>>> # Gaussian spread)
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(gk.real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
if sigma_x is None:
sigma_x = _sigma_prefactor(bandwidth) / frequency
if sigma_y is None:
sigma_y = _sigma_prefactor(bandwidth) / frequency
x0 = np.ceil(max(np.abs(n_stds * sigma_x * np.cos(theta)),
np.abs(n_stds * sigma_y * np.sin(theta)), 1))
y0 = np.ceil(max(np.abs(n_stds * sigma_y * np.cos(theta)),
np.abs(n_stds * sigma_x * np.sin(theta)), 1))
y, x = np.mgrid[-y0:y0 + 1, -x0:x0 + 1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape, dtype=np.complex)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.exp(1j * (2 * np.pi * frequency * rotx + offset))
return g
def gabor_filter(image, frequency, theta=0, bandwidth=1, sigma_x=None,
sigma_y=None, n_stds=3, offset=0, mode='reflect', cval=0):
"""Return real and imaginary responses to Gabor filter.
The real and imaginary parts of the Gabor filter kernel are applied to the
image and the response is returned as a pair of arrays.
Gabor filter is a linear filter with a Gaussian kernel which is modulated
by a sinusoidal plane wave. Frequency and orientation representations of
the Gabor filter are similar to those of the human visual system.
Gabor filter banks are commonly used in computer vision and image
processing. They are especially suitable for edge detection and texture
classification.
Parameters
----------
image : 2-D array
Input image.
frequency : float
Spatial frequency of the harmonic function. Specified in pixels.
theta : float, optional
Orientation in radians. If 0, the harmonic is in the x-direction.
bandwidth : float, optional
The bandwidth captured by the filter. For fixed bandwidth, `sigma_x`
and `sigma_y` will decrease with increasing frequency. This value is
ignored if `sigma_x` and `sigma_y` are set by the user.
sigma_x, sigma_y : float, optional
Standard deviation in x- and y-directions. These directions apply to
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
rotated 90 degrees so that `sigma_x` controls the *vertical* direction.
n_stds : scalar, optional
The linear size of the kernel is n_stds (3 by default) standard
deviations.
offset : float, optional
Phase offset of harmonic function in radians.
mode : string, optional
Mode used to convolve image with a kernel, passed to `ndi.convolve`
cval : scalar, optional
Value to fill past edges of input if `mode` of convolution is
'constant'. The parameter is passed to `ndi.convolve`.
Returns
-------
real, imag : arrays
Filtered images using the real and imaginary parts of the Gabor filter
kernel. Images are of the same dimensions as the input one.
References
----------
.. [1] http://en.wikipedia.org/wiki/Gabor_filter
.. [2] http://mplab.ucsd.edu/tutorials/gabor.pdf
Examples
--------
>>> from skimage.filter import gabor_filter
>>> from skimage import data, io
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> image = data.coins()
>>> # detecting edges in a coin image
>>> filt_real, filt_imag = gabor_filter(image, frequency=0.6)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(filt_real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
>>> # less sensitivity to finer details with the lower frequency kernel
>>> filt_real, filt_imag = gabor_filter(image, frequency=0.1)
>>> plt.figure() # doctest: +SKIP
>>> io.imshow(filt_real) # doctest: +SKIP
>>> io.show() # doctest: +SKIP
"""
assert_nD(image, 2)
g = gabor_kernel(frequency, theta, bandwidth, sigma_x, sigma_y, n_stds,
offset)
filtered_real = ndi.convolve(image, np.real(g), mode=mode, cval=cval)
filtered_imag = ndi.convolve(image, np.imag(g), mode=mode, cval=cval)
return filtered_real, filtered_imag
| bsd-3-clause |
vincentdumont/nuri | nuri/utils.py | 1 | 1449 | #!/usr/bin/env python
import sys,nuri,os,numpy
import matplotlib.pyplot as plt
import matplotlib.dates as md
from datetime import datetime,timedelta
def check24hrs(t0,t1,station):
"""
This operation will display the active periods for which data are
available from every sensors.
Parameters
----------
date : str
Year and month to display activity from. The format shoud be YYYY-MM.
"""
# Download metadata from Google Drive
sys.stderr.write('Retrieve information from Google Drive...')
os.system('skicka ls -r /MagneticFieldData/ > data')
data = numpy.loadtxt('data',dtype=str,delimiter='\n')
print >>sys.stderr,' done!'
# Get date list
t0 = datetime(*numpy.array(t0.split('-'),dtype=int))
t1 = datetime(*numpy.array(t1.split('-'),dtype=int))
dt = timedelta(hours=1)
dates = numpy.arange(t0,t1,dt)
# List file path for each date and each station
tick = 0
for d in dates:
year = d.astype(object).year
month = d.astype(object).month
day = d.astype(object).day
hour = d.astype(object).hour
path = 'MagneticFieldData/%i/%i/%i/%i/'%(year,month,day,hour)
fname = '%i-%i-%i_%i-xx.zip'%(year,month,day,hour)
if path+'NURI-station-%02i/'%station+fname in data:
tick+=1
if tick==24:
print year,month,day,hour
if hour==23:
tick = 0
os.system('rm data')
| mit |
hitszxp/scikit-learn | sklearn/datasets/tests/test_lfw.py | 50 | 6849 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
fabianp/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
johnchase/scikit-bio | skbio/stats/distance/tests/test_anosim.py | 13 | 4920 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import anosim
class TestANOSIM(TestCase):
"""All results were verified with R (vegan::anosim)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size. This data also generates a
# negative R statistic.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],
name='ANOSIM results')
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = anosim(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],
name='ANOSIM results')
obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_unequal_group_sizes(self):
exp = pd.Series(index=self.exp_index,
data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],
name='ANOSIM results')
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
JonnyCE/project-platform | thesis/proto_direct.py | 2 | 10487 | from matplotlib.pyplot import *
import math
from thesis.proto_shared import *
# 1 node 1 part batch job, verification
def batch_job_verification(filename='./exp_batch.json'):
config = json_parser(filename)
title_postfix = config['MTTR']
ckpt_candidates = get_checkpoint_candidates(config)
exp, l_median, l_mean, dev = dict_to_execute(config, ckpt_candidates, single_node=True)
fig, ax = subplots()
ax.plot(
ckpt_candidates, exp,
label="exp. of running time", color=yellow, alpha=0.8,
marker='o', markersize=4, markeredgecolor=yellow,
markerfacecolor=yellow)
ax.errorbar(ckpt_candidates, l_mean, dev, alpha=0.8, marker='o',
markersize=4, label="Running time w/ std dev")
ax.set_title("Single-node Batch Job (MTTR: {})".format(title_postfix))
ax.set_xlabel("Checkpoint write interval (unit)")
ax.set_ylabel("Running time (unit)")
legend()
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end, 2))
savefig("graph_batch_MTTR{}".format(title_postfix), format='pdf')
# 10 nodes, 40 parts, multi nodes are down, non-detachable or detachable
# ==============================================================================
def proto_multi_2cases(
file_non_detachable='./exp_1node_down_part_not_detachable.json',
file_detachable='./exp_1node_down_part_detachable.json'):
victim_nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
config = json_parser(file_non_detachable)
ckpt_candidates = get_checkpoint_candidates(config)
lines_dependent = []
for vn in victim_nums:
print("non-detachable partition, victim number: {}".format(vn))
config = json_parser('./exp_1node_down_part_not_detachable.json')
exp1, median1, mean1, stdev1 = dict_to_execute(config, ckpt_candidates, False, vn)
lines_dependent.append(median1)
lines_independent = []
for vn in victim_nums:
print("detachable partition, victim number: {}".format(vn))
config = json_parser(file_detachable)
exp2, median2, mean2, std2 = dict_to_execute(config, ckpt_candidates, False, vn)
lines_independent.append(median2)
# fig 3 and 4
fig, (ax3, ax4) = subplots(1, 2, sharey='all')
# draw lines
for median in lines_dependent:
vn = lines_dependent.index(median)
ax3.t(ckpt_candidates, median, color=blue, alpha=0.2 + 0.1 * vn, marker='o',
markersize=4, markeredgecolor=blue, markerfacecolor=blue,
label="{} nodes down".format(vn + 1))
ax3.set_xlabel("Checkpoint interval (unit)")
ax3.set_ylabel("Median running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax3.set_title("Cluster w/ non-detachable partitions")
ax3.legend(fontsize=9, loc='lower right')
start, end = ax3.get_xlim()
ax3.xaxis.set_ticks(np.arange(start, end, 2))
# draw lines
for median in lines_independent:
vn = lines_independent.index(median)
ax4.t(ckpt_candidates, median, color=blue, alpha=0.2 + 0.1 * vn, marker='o',
markersize=4, markeredgecolor=blue, markerfacecolor=blue,
label="{} nodes down".format(vn + 1))
ax4.set_xlabel("Checkpoint interval (unit)")
ax4.set_ylabel("Median running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax4.set_title("Cluster w/ detachable partitions")
ax4.legend(fontsize=9, loc='lower right')
start, end = ax4.get_xlim()
ax4.xaxis.set_ticks(np.arange(start, end, 2))
savefig("graph_multi")
# 10 nodes, 40 parts, multi nodes are down, interdependent or not
# ==============================================================================
def proto_multi_detachable(
file_detachable='./exp_1node_down_part_detachable.json'):
victim_nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
config = json_parser(file_detachable)
ckpt_candidates = get_checkpoint_candidates(config)
lines_independent = []
for vn in victim_nums:
print("detachable partition, victim number: {}".format(vn))
config = json_parser(file_detachable)
exp2, median, mean, std = dict_to_execute(config, ckpt_candidates, False, vn)
fig, ax4 = subplots(1, 1, sharey='all')
ax4.t(ckpt_candidates, median, color=blue, alpha=0.9,
marker='o', markersize=4, markeredgecolor=blue,
markerfacecolor=blue, label="{} nodes down".format(vn + 1))
ax4.set_xlabel("Checkpoint interval (unit)")
ax4.set_ylabel("Median running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax4.set_title("Cluster w/ detachable partitions")
ax4.legend(fontsize=9, loc='lower right')
start, end = ax4.get_xlim()
ax4.xaxis.set_ticks(np.arange(start, end, 2))
# vertical lines
mttr, delta = config['MTTR'], config['checkpoint_write_time']
nnodes = config['node']
optimum0 = math.sqrt(2 * mttr * delta)
optimum1 = math.sqrt(2 * mttr * delta * vn / nnodes)
axvline(x=optimum0)
axvline(x=optimum1)
ymin, ymax = ax4.get_ylim()
ymid = (ymin + ymax) / 2
ax4.annotate(r'$\sqrt{2M\delta}$', xy=(optimum0, ymid),
xytext=(2, 2))
ax4.annotate(r'$\sqrt{\frac{2M\delta\cdot i}{N}}$', xy=(optimum1, ymid),
xytext=(2, 2))
savefig("graph_multi_detachable_victim{}".format(vn))
# 10 nodes, 40 parts, multi nodes are down, interdependent or not
# ==============================================================================
def proto_multi_2in1(
file_non_detachable='./exp_1node_down_part_not_detachable.json',
file_detachable='./exp_1node_down_part_detachable.json',
non_detachable=False,
detachable=False):
victim_nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
config = json_parser(file_non_detachable)
ckpt_candidates = get_checkpoint_candidates(config)
if non_detachable:
lines_dependent = []
for vn in victim_nums:
print("non-detachable partition, victim number: {}".format(vn))
config = json_parser('./exp_1node_down_part_not_detachable.json')
exp1, median1, mean1, std1 = dict_to_execute(config, ckpt_candidates, False, vn)
lines_dependent.append(median1)
# draw lines
fig, ax3 = subplots(1, 1, sharey='all')
for median in lines_dependent:
vn = lines_dependent.index(median)
ax3.t(ckpt_candidates, median, color=blue, alpha=0.2 + 0.1 * vn,
marker='o',
markersize=4, markeredgecolor=blue, markerfacecolor=blue,
label="{} nodes down".format(vn + 1))
ax3.set_xlabel("Checkpoint interval (unit)")
ax3.set_ylabel("Median running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax3.set_title("Cluster w/ non-detachable partitions")
ax3.legend(fontsize=9, loc='lower right')
start, end = ax3.get_xlim()
ax3.xaxis.set_ticks(np.arange(start, end, 2))
savefig("graph_multi_nondetachable.pdf")
if detachable:
lines_independent = []
exp2, median2, mean2, stdev2 = None, None, None, None
for vn in victim_nums:
print("detachable partition, victim number: {}".format(vn))
config = json_parser(file_detachable)
exp2, median2, mean2, stdev2 = dict_to_execute(config, ckpt_candidates, False, vn)
lines_independent.append(median2)
# draw lines
fig, ax4 = subplots(1, 1, sharey='all')
for median in lines_independent:
vn = lines_independent.index(median)
ax4.plot(
ckpt_candidates, exp2,
label="exp. of running time", color=yellow, alpha=0.8,
marker='o', markersize=4, markeredgecolor=yellow,
markerfacecolor=yellow)
ax4.plot(
ckpt_candidates, median, color=blue, alpha=0.2 + 0.1 * vn,
marker='o',
markersize=4, markeredgecolor=blue, markerfacecolor=blue,
label="{} nodes down".format(vn + 1))
ax4.set_xlabel("Checkpoint interval (unit)")
ax4.set_ylabel("Median running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax4.set_title("Cluster w/ detachable partitions")
ax4.legend(fontsize=9, loc='lower right')
start, end = ax4.get_xlim()
ax4.xaxis.set_ticks(np.arange(start, end, 2))
savefig("graph_multi_detachable.pdf")
def one_down():
# data
config = json_parser('./exp_1node_down_part_not_detachable.json')
config_detachable = json_parser('./exp_1node_down_part_detachable.json')
ckpt_candidates = get_checkpoint_candidates(config)
exp, l_median1, l_mean1, dev1 = dict_to_execute(config, ckpt_candidates)
exp, l_median2, l_mean2, dev2 = dict_to_execute(config_detachable, ckpt_candidates)
# figure
fig, (ax1, ax2) = subplots(1, 2, sharey='all')
ax1.t(ckpt_candidates, exp, label="expected completion time", color=yellow, alpha=0.8,
marker='o', markersize=4, markeredgecolor=yellow,
markerfacecolor=yellow)
ax1.errorbar(ckpt_candidates, l_mean1, dev1, color=blue, alpha=0.8, marker='o',
markersize=4, label="Running time w/ std dev")
ax1.set_xlabel("Checkpoint write interval (unit)")
ax1.set_ylabel("Running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax1.set_title("1 node down, part non-detachable")
legend(fontsize=9, loc=0)
start, end = ax1.get_xlim()
ax1.xaxis.set_ticks(np.arange(start, end, 2))
ax2.t(ckpt_candidates, exp, label="expected completion time", color=yellow, alpha=0.8,
marker='o', markersize=4, markeredgecolor=yellow,
markerfacecolor=yellow)
ax2.errorbar(ckpt_candidates, l_mean2, dev2, color=blue, alpha=0.8, marker='o',
markersize=4, label="Running time w/ std dev")
ax2.set_xlabel("Checkpoint write interval (unit)")
ax2.set_ylabel("Running time (unit)")
tick_params(axis='both', which='major', labelsize=9)
ax2.set_title("1 node down, part detachable")
legend(fontsize=9, loc=0)
start, end = ax2.get_xlim()
ax2.xaxis.set_ticks(np.arange(start, end, 2))
savefig("graph_1down.pdf")
batch_job_verification()
| mit |
khkaminska/scikit-learn | sklearn/externals/joblib/__init__.py | 72 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
OGGM/oggm | oggm/core/flowline.py | 1 | 126363 | """Flowline modelling: bed shapes and model numerics.
"""
# Builtins
import logging
import copy
from collections import OrderedDict
from functools import partial
from time import gmtime, strftime
import os
import shutil
import warnings
# External libs
import numpy as np
import shapely.geometry as shpg
import xarray as xr
# Optional libs
try:
import salem
except ImportError:
pass
import pandas as pd
# Locals
from oggm import __version__
import oggm.cfg as cfg
from oggm import utils
from oggm import entity_task
from oggm.exceptions import InvalidParamsError, InvalidWorkflowError
from oggm.core.massbalance import (MultipleFlowlineMassBalance,
ConstantMassBalance,
PastMassBalance,
RandomMassBalance)
from oggm.core.centerlines import Centerline, line_order
from oggm.core.inversion import find_sia_flux_from_thickness
# Constants
from oggm.cfg import SEC_IN_DAY, SEC_IN_YEAR
from oggm.cfg import G, GAUSSIAN_KERNEL
# Module logger
log = logging.getLogger(__name__)
class Flowline(Centerline):
"""A Centerline with additional properties: input to the FlowlineModel
"""
def __init__(self, line=None, dx=1, map_dx=None,
surface_h=None, bed_h=None, rgi_id=None,
water_level=None):
""" Initialize a Flowline
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
dx : float
Grid spacing in pixel coordinates
map_dx : float
DEM grid spacing in meters
surface_h: :py:class:`numpy.ndarray`
elevation [m] of the flowline grid points
bed_h: :py:class:`numpy.ndarray`
elevation[m] of the bedrock at the flowline grid points
rgi_id : str
The glacier's RGI identifier
water_level : float
The water level (to compute volume below sea-level)
"""
# This is do add flexibility for testing
if dx is None:
dx = 1.
if line is None:
coords = np.arange(len(surface_h)) * dx
line = shpg.LineString(np.vstack([coords, coords * 0.]).T)
super(Flowline, self).__init__(line, dx, surface_h)
self._thick = utils.clip_min(surface_h - bed_h, 0.)
self.map_dx = map_dx
self.dx_meter = map_dx * self.dx
self.bed_h = bed_h
self.rgi_id = rgi_id
self.water_level = water_level
# volume not yet removed from the flowline
self.calving_bucket_m3 = 0
def has_ice(self):
return np.any(self.thick > 0)
@Centerline.widths.getter
def widths(self):
"""Compute the widths out of H and shape"""
return self.widths_m / self.map_dx
@property
def thick(self):
"""Needed for overriding later"""
return self._thick
@thick.setter
def thick(self, value):
self._thick = utils.clip_min(value, 0)
@Centerline.surface_h.getter
def surface_h(self):
return self._thick + self.bed_h
@surface_h.setter
def surface_h(self, value):
self.thick = value - self.bed_h
@property
def bin_area_m2(self):
# area of the grid point
# this takes the ice thickness into account
return np.where(self.thick > 0, self.widths_m, 0) * self.dx_meter
@property
def length_m(self):
# TODO: take calving bucket into account for fine tuned length?
lt = cfg.PARAMS.get('min_ice_thick_for_length', 0)
if cfg.PARAMS.get('glacier_length_method') == 'consecutive':
if (self.thick > lt).all():
nx = len(self.thick)
else:
nx = np.where(self.thick <= lt)[0][0]
else:
nx = len(np.where(self.thick > lt)[0])
return nx * self.dx_meter
@property
def terminus_index(self):
# the index of the last point with ice thickness above
# min_ice_thick_for_length and consistent with length
lt = cfg.PARAMS.get('min_ice_thick_for_length', 0)
if cfg.PARAMS.get('glacier_length_method') == 'consecutive':
if (self.thick > lt).all():
ix = len(self.thick) - 1
else:
ix = np.where(self.thick <= lt)[0][0] - 1
else:
try:
ix = np.where(self.thick > lt)[0][-1]
except IndexError:
ix = -1
return ix
@property
def volume_m3(self):
return utils.clip_min(np.sum(self.section * self.dx_meter) -
getattr(self, 'calving_bucket_m3', 0), 0)
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
def _vol_below_level(self, water_level=0):
thick = np.copy(self.thick)
n_thick = np.copy(thick)
bwl = (self.bed_h < water_level) & (thick > 0)
n_thick[~bwl] = 0
self.thick = n_thick
vol_tot = np.sum(self.section * self.dx_meter)
n_thick[bwl] = utils.clip_max(self.surface_h[bwl],
water_level) - self.bed_h[bwl]
self.thick = n_thick
vol_bwl = np.sum(self.section * self.dx_meter)
self.thick = thick
fac = vol_bwl / vol_tot if vol_tot > 0 else 0
return utils.clip_min(vol_bwl -
getattr(self, 'calving_bucket_m3', 0) * fac, 0)
@property
def volume_bsl_m3(self):
return self._vol_below_level(water_level=0)
@property
def volume_bsl_km3(self):
return self.volume_bsl_m3 * 1e-9
@property
def volume_bwl_m3(self):
return self._vol_below_level(water_level=self.water_level)
@property
def volume_bwl_km3(self):
return self.volume_bwl_m3 * 1e-9
@property
def area_m2(self):
# TODO: take calving bucket into account
return np.sum(self.bin_area_m2)
@property
def area_km2(self):
return self.area_m2 * 1e-6
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
raise NotImplementedError()
def to_dataset(self):
"""Makes an xarray Dataset out of the flowline."""
h = self.surface_h
nx = len(h)
ds = xr.Dataset()
ds.coords['x'] = np.arange(nx)
ds.coords['c'] = [0, 1]
try:
ds['linecoords'] = (['x', 'c'], np.asarray(self.line.coords))
except AttributeError:
# squeezed lines
pass
ds['surface_h'] = (['x'], h)
ds['bed_h'] = (['x'], self.bed_h)
ds.attrs['class'] = type(self).__name__
ds.attrs['map_dx'] = self.map_dx
ds.attrs['dx'] = self.dx
self._add_attrs_to_dataset(ds)
return ds
class ParabolicBedFlowline(Flowline):
"""A parabolic shaped Flowline with one degree of freedom
"""
def __init__(self, line=None, dx=None, map_dx=None,
surface_h=None, bed_h=None, bed_shape=None, rgi_id=None,
water_level=None):
""" Instanciate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
Properties
----------
#TODO: document properties
"""
super(ParabolicBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level)
assert np.all(np.isfinite(bed_shape))
self.bed_shape = bed_shape
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return np.sqrt(4*self.thick/self.bed_shape)
@property
def section(self):
return 2./3. * self.widths_m * self.thick
@section.setter
def section(self, val):
self.thick = (0.75 * val * np.sqrt(self.bed_shape))**(2./3.)
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('parabolic', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['bed_shape'] = (['x'], self.bed_shape)
class RectangularBedFlowline(Flowline):
"""Simple shaped Flowline, glacier width does not change with ice thickness
"""
def __init__(self, line=None, dx=None, map_dx=None,
surface_h=None, bed_h=None, widths=None, rgi_id=None,
water_level=None):
""" Instanciate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
Properties
----------
#TODO: document properties
"""
super(RectangularBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level)
self._widths = widths
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return self._widths * self.map_dx
@property
def section(self):
return self.widths_m * self.thick
@section.setter
def section(self, val):
self.thick = val / self.widths_m
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('rectangular', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['widths'] = (['x'], self._widths)
class TrapezoidalBedFlowline(Flowline):
"""A Flowline with trapezoidal shape and two degrees of freedom
"""
def __init__(self, line=None, dx=None, map_dx=None, surface_h=None,
bed_h=None, widths=None, lambdas=None, rgi_id=None,
water_level=None):
""" Instanciate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
Properties
----------
#TODO: document properties
"""
super(TrapezoidalBedFlowline, self).__init__(line, dx, map_dx,
surface_h, bed_h,
rgi_id=rgi_id,
water_level=water_level)
self._w0_m = widths * self.map_dx - lambdas * self.thick
if np.any(self._w0_m <= 0):
raise ValueError('Trapezoid beds need to have origin widths > 0.')
self._prec = np.where(lambdas == 0)[0]
self._lambdas = lambdas
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
return self._w0_m + self._lambdas * self.thick
@property
def section(self):
return (self.widths_m + self._w0_m) / 2 * self.thick
@section.setter
def section(self, val):
b = 2 * self._w0_m
a = 2 * self._lambdas
with np.errstate(divide='ignore', invalid='ignore'):
thick = (np.sqrt(b**2 + 4 * a * val) - b) / a
thick[self._prec] = val[self._prec] / self._w0_m[self._prec]
self.thick = thick
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
return np.repeat('trapezoid', self.nx)
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['widths'] = (['x'], self.widths)
ds['lambdas'] = (['x'], self._lambdas)
class MixedBedFlowline(Flowline):
"""A Flowline which can take a combination of different shapes (default)
The default shape is parabolic. At ice divides a rectangular shape is used.
And if the parabola gets too flat a trapezoidal shape is used.
"""
def __init__(self, *, line=None, dx=None, map_dx=None, surface_h=None,
bed_h=None, section=None, bed_shape=None,
is_trapezoid=None, lambdas=None, widths_m=None, rgi_id=None,
water_level=None):
""" Instanciate.
Parameters
----------
line : :py:class:`shapely.geometry.LineString`
the geometrical line of a :py:class:`oggm.Centerline`
Properties
----------
#TODO: document properties
width_m is optional - for thick=0
"""
super(MixedBedFlowline, self).__init__(line=line, dx=dx, map_dx=map_dx,
surface_h=surface_h.copy(),
bed_h=bed_h.copy(),
rgi_id=rgi_id,
water_level=water_level)
# To speedup calculations if no trapezoid bed is present
self._do_trapeze = np.any(is_trapezoid)
# Parabolic
assert len(bed_shape) == self.nx
self.bed_shape = bed_shape.copy()
self._sqrt_bed = np.sqrt(bed_shape)
# Trapeze
assert len(lambdas) == self.nx
assert len(is_trapezoid) == self.nx
self._lambdas = lambdas.copy()
self._ptrap = np.where(is_trapezoid)[0]
self.is_trapezoid = is_trapezoid
self.is_rectangular = self.is_trapezoid & (self._lambdas == 0)
# Sanity
self.bed_shape[is_trapezoid] = np.NaN
self._lambdas[~is_trapezoid] = np.NaN
# Here we have to compute the widths out of section and lambda
thick = surface_h - bed_h
with np.errstate(divide='ignore', invalid='ignore'):
self._w0_m = section / thick - lambdas * thick / 2
assert np.all(section >= 0)
need_w = (section == 0) & is_trapezoid
if np.any(need_w):
if widths_m is None:
raise ValueError('We need a non-zero section for trapezoid '
'shapes unless you provide widths_m.')
self._w0_m[need_w] = widths_m[need_w]
self._w0_m[~is_trapezoid] = np.NaN
if (np.any(self._w0_m[self._ptrap] <= 0) or
np.any(~np.isfinite(self._w0_m[self._ptrap]))):
raise ValueError('Trapezoid beds need to have origin widths > 0.')
assert np.all(self.bed_shape[~is_trapezoid] > 0)
self._prec = np.where(is_trapezoid & (lambdas == 0))[0]
assert np.allclose(section, self.section)
@property
def widths_m(self):
"""Compute the widths out of H and shape"""
out = np.sqrt(4*self.thick/self.bed_shape)
if self._do_trapeze:
out[self._ptrap] = (self._w0_m[self._ptrap] +
self._lambdas[self._ptrap] *
self.thick[self._ptrap])
return out
@property
def section(self):
out = 2./3. * self.widths_m * self.thick
if self._do_trapeze:
out[self._ptrap] = ((self.widths_m[self._ptrap] +
self._w0_m[self._ptrap]) / 2 *
self.thick[self._ptrap])
return out
@section.setter
def section(self, val):
out = (0.75 * val * self._sqrt_bed)**(2./3.)
if self._do_trapeze:
b = 2 * self._w0_m[self._ptrap]
a = 2 * self._lambdas[self._ptrap]
with np.errstate(divide='ignore', invalid='ignore'):
out[self._ptrap] = ((np.sqrt(b ** 2 + 4 * a * val[self._ptrap])
- b) / a)
out[self._prec] = val[self._prec] / self._w0_m[self._prec]
self.thick = out
@utils.lazy_property
def shape_str(self):
"""The bed shape in text (for debug and other things)"""
out = np.repeat('rectangular', self.nx)
out[~ self.is_trapezoid] = 'parabolic'
out[self.is_trapezoid & ~ self.is_rectangular] = 'trapezoid'
return out
def _add_attrs_to_dataset(self, ds):
"""Add bed specific parameters."""
ds['section'] = (['x'], self.section)
ds['bed_shape'] = (['x'], self.bed_shape)
ds['is_trapezoid'] = (['x'], self.is_trapezoid)
ds['widths_m'] = (['x'], self._w0_m)
ds['lambdas'] = (['x'], self._lambdas)
class FlowlineModel(object):
"""Interface to OGGM's flowline models"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
fs=None, inplace=False, smooth_trib_influx=True,
is_tidewater=False, is_lake_terminating=False,
mb_elev_feedback='annual', check_for_boundaries=None,
water_level=None):
"""Create a new flowline model from the flowlines and a MB model.
Parameters
----------
flowlines : list
a list of :py:class:`oggm.Flowline` instances, sorted by order
mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`
the MB model to use
y0 : int
the starting year of the simulation
glen_a : float
glen's parameter A
fs: float
sliding parameter
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
smooth_trib_influx : bool
whether to smooth the mass influx from the incoming tributary.
The default is to use a gaussian kernel on a 9 grid points
window.
is_tidewater: bool, default: False
is this a tidewater glacier?
is_lake_terminating: bool, default: False
is this a lake terminating glacier?
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries : bool
whether the model should raise an error when the glacier exceeds
the domain boundaries. The default is to follow
PARAMS['error_when_glacier_reaches_boundaries']
"""
self.is_tidewater = is_tidewater
self.is_lake_terminating = is_lake_terminating
self.is_marine_terminating = is_tidewater and not is_lake_terminating
if water_level is None:
self.water_level = 0
if self.is_lake_terminating:
if not flowlines[-1].has_ice():
raise InvalidParamsError('Set `water_level` for lake '
'terminating glaciers in '
'idealized runs')
# Arbitrary water level 1m below last grid points elevation
min_h = flowlines[-1].surface_h[flowlines[-1].thick > 0][-1]
self.water_level = (min_h -
cfg.PARAMS['free_board_lake_terminating'])
else:
self.water_level = water_level
# Mass balance
self.mb_elev_feedback = mb_elev_feedback.lower()
if self.mb_elev_feedback in ['never', 'annual']:
self.mb_step = 'annual'
elif self.mb_elev_feedback in ['always', 'monthly']:
self.mb_step = 'monthly'
self.mb_model = mb_model
# Defaults
if glen_a is None:
glen_a = cfg.PARAMS['glen_a']
if fs is None:
fs = cfg.PARAMS['fs']
self.glen_a = glen_a
self.fs = fs
self.glen_n = cfg.PARAMS['glen_n']
self.rho = cfg.PARAMS['ice_density']
if check_for_boundaries is None:
check_for_boundaries = cfg.PARAMS[('error_when_glacier_reaches_'
'boundaries')]
self.check_for_boundaries = check_for_boundaries
# we keep glen_a as input, but for optimisation we stick to "fd"
self._fd = 2. / (cfg.PARAMS['glen_n']+2) * self.glen_a
# Calving shenanigans
self.calving_m3_since_y0 = 0. # total calving since time y0
self.calving_rate_myr = 0.
self.y0 = None
self.t = None
self.reset_y0(y0)
self.fls = None
self._tributary_indices = None
self.reset_flowlines(flowlines, inplace=inplace,
smooth_trib_influx=smooth_trib_influx)
@property
def mb_model(self):
return self._mb_model
@mb_model.setter
def mb_model(self, value):
# We need a setter because the MB func is stored as an attr too
_mb_call = None
if value:
if self.mb_elev_feedback in ['always', 'monthly']:
_mb_call = value.get_monthly_mb
elif self.mb_elev_feedback in ['annual', 'never']:
_mb_call = value.get_annual_mb
else:
raise ValueError('mb_elev_feedback not understood')
self._mb_model = value
self._mb_call = _mb_call
self._mb_current_date = None
self._mb_current_out = dict()
self._mb_current_heights = dict()
def reset_y0(self, y0):
"""Reset the initial model time"""
self.y0 = y0
self.t = 0
def reset_flowlines(self, flowlines, inplace=False,
smooth_trib_influx=True):
"""Reset the initial model flowlines"""
if not inplace:
flowlines = copy.deepcopy(flowlines)
try:
len(flowlines)
except TypeError:
flowlines = [flowlines]
self.fls = flowlines
# list of tributary coordinates and stuff
trib_ind = []
for fl in self.fls:
# Important also
fl.water_level = self.water_level
if fl.flows_to is None:
trib_ind.append((None, None, None, None))
continue
idl = self.fls.index(fl.flows_to)
ide = fl.flows_to_indice
if not smooth_trib_influx:
gk = 1
id0 = ide
id1 = ide+1
elif fl.flows_to.nx >= 9:
gk = GAUSSIAN_KERNEL[9]
id0 = ide-4
id1 = ide+5
elif fl.flows_to.nx >= 7:
gk = GAUSSIAN_KERNEL[7]
id0 = ide-3
id1 = ide+4
elif fl.flows_to.nx >= 5:
gk = GAUSSIAN_KERNEL[5]
id0 = ide-2
id1 = ide+3
trib_ind.append((idl, id0, id1, gk))
self._tributary_indices = trib_ind
@property
def yr(self):
return self.y0 + self.t / SEC_IN_YEAR
@property
def area_m2(self):
return np.sum([f.area_m2 for f in self.fls])
@property
def volume_m3(self):
return np.sum([f.volume_m3 for f in self.fls])
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
@property
def volume_bsl_m3(self):
return np.sum([f.volume_bsl_m3 for f in self.fls])
@property
def volume_bsl_km3(self):
return self.volume_bsl_m3 * 1e-9
@property
def volume_bwl_m3(self):
return np.sum([f.volume_bwl_m3 for f in self.fls])
@property
def volume_bwl_km3(self):
return self.volume_bwl_m3 * 1e-9
@property
def area_km2(self):
return self.area_m2 * 1e-6
@property
def length_m(self):
return self.fls[-1].length_m
def get_mb(self, heights, year=None, fl_id=None, fls=None):
"""Get the mass balance at the requested height and time.
Optimized so that no mb model call is necessary at each step.
"""
# Do we even have to optimise?
if self.mb_elev_feedback == 'always':
return self._mb_call(heights, year=year, fl_id=fl_id, fls=fls)
# Ok, user asked for it
if fl_id is None:
raise ValueError('Need fls_id')
if self.mb_elev_feedback == 'never':
# The very first call we take the heights
if fl_id not in self._mb_current_heights:
# We need to reset just this tributary
self._mb_current_heights[fl_id] = heights
# All calls we replace
heights = self._mb_current_heights[fl_id]
date = utils.floatyear_to_date(year)
if self.mb_elev_feedback in ['annual', 'never']:
# ignore month changes
date = (date[0], date[0])
if self._mb_current_date == date:
if fl_id not in self._mb_current_out:
# We need to reset just this tributary
self._mb_current_out[fl_id] = self._mb_call(heights,
year=year,
fl_id=fl_id,
fls=fls)
else:
# We need to reset all
self._mb_current_date = date
self._mb_current_out = dict()
self._mb_current_out[fl_id] = self._mb_call(heights,
year=year,
fl_id=fl_id,
fls=fls)
return self._mb_current_out[fl_id]
def to_netcdf(self, path):
"""Creates a netcdf group file storing the state of the model."""
flows_to_id = []
for trib in self._tributary_indices:
flows_to_id.append(trib[0] if trib[0] is not None else -1)
ds = xr.Dataset()
try:
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
ds['flowlines'] = ('flowlines', np.arange(len(flows_to_id)))
ds['flows_to_id'] = ('flowlines', flows_to_id)
ds.to_netcdf(path)
for i, fl in enumerate(self.fls):
ds = fl.to_dataset()
ds.to_netcdf(path, 'a', group='fl_{}'.format(i))
finally:
ds.close()
def check_domain_end(self):
"""Returns False if the glacier reaches the domains bound."""
return np.isclose(self.fls[-1].thick[-1], 0)
def step(self, dt):
"""Advance the numerical simulation of one single step.
Important: the step dt is a maximum boundary that is *not* guaranteed
to be met if dt is too large for the underlying numerical
implementation. However, ``step(dt)`` should never cross the desired
time step, i.e. if dt is small enough to ensure stability, step
should match it.
The caller will know how much has been actually advanced by looking
at the output of ``step()`` or by monitoring ``self.t`` or `self.yr``
Parameters
----------
dt : float
the step length in seconds
Returns
-------
the actual dt chosen by the numerical implementation. Guaranteed to
be dt or lower.
"""
raise NotImplementedError
def run_until(self, y1):
"""Runs the model from the current year up to a given year date y1.
This function runs the model for the time difference y1-self.y0
If self.y0 has not been specified at some point, it is 0 and y1 will
be the time span in years to run the model for.
Parameters
----------
y1 : float
Upper time span for how long the model should run
"""
# We force timesteps to monthly frequencies for consistent results
# among use cases (monthly or yearly output) and also to prevent
# "too large" steps in the adaptive scheme.
ts = utils.monthly_timeseries(self.yr, y1)
# Add the last date to be sure we end on it
ts = np.append(ts, y1)
# Loop over the steps we want to meet
for y in ts:
t = (y - self.y0) * SEC_IN_YEAR
# because of CFL, step() doesn't ensure that the end date is met
# lets run the steps until we reach our desired date
while self.t < t:
self.step(t-self.t)
# Check for domain bounds
if self.check_for_boundaries:
if self.fls[-1].thick[-1] > 10:
raise RuntimeError('Glacier exceeds domain boundaries, '
'at year: {}'.format(self.yr))
# Check for NaNs
for fl in self.fls:
if np.any(~np.isfinite(fl.thick)):
raise FloatingPointError('NaN in numerical solution, '
'at year: {}'.format(self.yr))
def run_until_and_store(self, y1,
run_path=None,
geom_path=None,
diag_path=None,
store_monthly_step=None):
"""Runs the model and returns intermediate steps in xarray datasets.
This function repeatedly calls FlowlineModel.run_until for either
monthly or yearly time steps up till the upper time boundary y1.
Parameters
----------
y1 : int
Upper time span for how long the model should run (needs to be
a full year)
run_path : str
Deprecated and renamed to geom_path
geom_path : str or bool
Path and filename where to store the model geometry dataset. This
dataset contains all necessary info to retrieve the full glacier
geometry after the run, with a FileModel. This is stored
on an annual basis.
The default (None) will not store the dataset to disk but return
the dataset to the user after execution.
Set this to False to prevent creating this dataset altogether
(for optimisation).
diag_path : str
Path and filename where to store the model diagnostics dataset
store_monthly_step : Bool
If True (False) model diagnostics will be stored monthly (yearly).
If unspecified, we follow the update of the MB model, which
defaults to yearly (see __init__).
Returns
-------
geom_ds : xarray.Dataset or None
stores the entire glacier geometry. It is useful to visualize the
glacier geometry or to restart a new run from a modelled geometry.
The glacier state is stored at the beginning of each hydrological
year (not in between in order to spare disk space).
diag_ds : xarray.Dataset
stores a few diagnostic variables such as the volume, area, length
and ELA of the glacier.
"""
if int(y1) != y1:
raise InvalidParamsError('run_until_and_store only accepts '
'integer year dates.')
if not self.mb_model.hemisphere:
raise InvalidParamsError('run_until_and_store needs a '
'mass-balance model with an unambiguous '
'hemisphere.')
if run_path is not None:
warnings.warn("`run_path` has been renamed to `geom_path` and "
"will be deleted in the future.", FutureWarning)
geom_path = run_path
# Do we need to create a geometry dataset?
do_geom = geom_path is None or geom_path
# time
yearly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
if store_monthly_step is None:
store_monthly_step = self.mb_step == 'monthly'
if store_monthly_step:
monthly_time = utils.monthly_timeseries(self.yr, y1)
else:
monthly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
yrs, months = utils.floatyear_to_date(monthly_time)
cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
start_month=sm)
# init output
if geom_path:
self.to_netcdf(geom_path)
ny = len(yearly_time)
if ny == 1:
yrs = [yrs]
cyrs = [cyrs]
months = [months]
cmonths = [cmonths]
nm = len(monthly_time)
if do_geom:
sects = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
widths = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
bucket = [np.zeros(ny) for _ in self.fls]
# Diagnostics dataset
diag_ds = xr.Dataset()
# Global attributes
diag_ds.attrs['description'] = 'OGGM model output'
diag_ds.attrs['oggm_version'] = __version__
diag_ds.attrs['calendar'] = '365-day no leap'
diag_ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
diag_ds.attrs['water_level'] = self.water_level
diag_ds.attrs['glen_a'] = self.glen_a
diag_ds.attrs['fs'] = self.fs
# Add MB model attributes
diag_ds.attrs['mb_model_class'] = self.mb_model.__class__.__name__
for k, v in self.mb_model.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
diag_ds.attrs['mb_model_{}'.format(k)] = v
# Coordinates
diag_ds.coords['time'] = ('time', monthly_time)
diag_ds.coords['hydro_year'] = ('time', yrs)
diag_ds.coords['hydro_month'] = ('time', months)
diag_ds.coords['calendar_year'] = ('time', cyrs)
diag_ds.coords['calendar_month'] = ('time', cmonths)
diag_ds['time'].attrs['description'] = 'Floating hydrological year'
diag_ds['hydro_year'].attrs['description'] = 'Hydrological year'
diag_ds['hydro_month'].attrs['description'] = 'Hydrological month'
diag_ds['calendar_year'].attrs['description'] = 'Calendar year'
diag_ds['calendar_month'].attrs['description'] = 'Calendar month'
# Variables and attributes
ovars = cfg.PARAMS['store_diagnostic_variables']
if 'volume' in ovars:
diag_ds['volume_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_m3'].attrs['description'] = 'Total glacier volume'
diag_ds['volume_m3'].attrs['unit'] = 'm 3'
if 'volume_bsl' in ovars:
diag_ds['volume_bsl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bsl_m3'].attrs['description'] = ('Glacier volume '
'below '
'sea-level')
diag_ds['volume_bsl_m3'].attrs['unit'] = 'm 3'
if 'volume_bwl' in ovars:
diag_ds['volume_bwl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bwl_m3'].attrs['description'] = ('Glacier volume '
'below '
'water-level')
diag_ds['volume_bwl_m3'].attrs['unit'] = 'm 3'
if 'area' in ovars:
diag_ds['area_m2'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['area_m2'].attrs['description'] = 'Total glacier area'
diag_ds['area_m2'].attrs['unit'] = 'm 2'
if 'length' in ovars:
diag_ds['length_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['length_m'].attrs['description'] = 'Glacier length'
diag_ds['length_m'].attrs['unit'] = 'm'
if 'calving' in ovars:
diag_ds['calving_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_m3'].attrs['description'] = ('Total accumulated '
'calving flux')
diag_ds['calving_m3'].attrs['unit'] = 'm 3'
if 'calving_rate' in ovars:
diag_ds['calving_rate_myr'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_rate_myr'].attrs['description'] = 'Calving rate'
diag_ds['calving_rate_myr'].attrs['unit'] = 'm yr-1'
for gi in range(10):
vn = f'terminus_thick_{gi}'
if vn in ovars:
diag_ds[vn] = ('time', np.zeros(nm) * np.NaN)
diag_ds[vn].attrs['description'] = ('Thickness of grid point '
f'{gi} from terminus.')
diag_ds[vn].attrs['unit'] = 'm'
# Run
j = 0
for i, (yr, mo) in enumerate(zip(monthly_time, months)):
# Model run
self.run_until(yr)
# Glacier geometry
if do_geom and mo == 1:
for s, w, b, fl in zip(sects, widths, bucket, self.fls):
s[j, :] = fl.section
w[j, :] = fl.widths_m
if self.is_tidewater:
try:
b[j] = fl.calving_bucket_m3
except AttributeError:
pass
j += 1
# Diagnostics
if 'volume' in ovars:
diag_ds['volume_m3'].data[i] = self.volume_m3
if 'area' in ovars:
diag_ds['area_m2'].data[i] = self.area_m2
if 'length' in ovars:
diag_ds['length_m'].data[i] = self.length_m
if 'calving' in ovars:
diag_ds['calving_m3'].data[i] = self.calving_m3_since_y0
if 'calving_rate' in ovars:
diag_ds['calving_rate_myr'].data[i] = self.calving_rate_myr
if 'volume_bsl' in ovars:
diag_ds['volume_bsl_m3'].data[i] = self.volume_bsl_m3
if 'volume_bwl' in ovars:
diag_ds['volume_bwl_m3'].data[i] = self.volume_bwl_m3
# Terminus thick is a bit more logic
ti = None
for gi in range(10):
vn = f'terminus_thick_{gi}'
if vn in ovars:
if ti is None:
ti = self.fls[-1].terminus_index
diag_ds[vn].data[i] = self.fls[-1].thick[ti - gi]
# to datasets
geom_ds = None
if do_geom:
geom_ds = []
for (s, w, b) in zip(sects, widths, bucket):
ds = xr.Dataset()
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
ds.attrs['water_level'] = self.water_level
ds.attrs['glen_a'] = self.glen_a
ds.attrs['fs'] = self.fs
# Add MB model attributes
ds.attrs['mb_model_class'] = self.mb_model.__class__.__name__
for k, v in self.mb_model.__dict__.items():
if np.isscalar(v) and not k.startswith('_'):
ds.attrs['mb_model_{}'.format(k)] = v
ds.coords['time'] = yearly_time
ds['time'].attrs['description'] = 'Floating hydrological year'
varcoords = OrderedDict(time=('time', yearly_time),
year=('time', yearly_time))
ds['ts_section'] = xr.DataArray(s, dims=('time', 'x'),
coords=varcoords)
ds['ts_width_m'] = xr.DataArray(w, dims=('time', 'x'),
coords=varcoords)
ds['ts_calving_bucket_m3'] = xr.DataArray(b, dims=('time', ),
coords=varcoords)
geom_ds.append(ds)
# write output?
if do_geom and geom_path is not None:
encode = {'ts_section': {'zlib': True, 'complevel': 5},
'ts_width_m': {'zlib': True, 'complevel': 5},
}
for i, ds in enumerate(geom_ds):
ds.to_netcdf(geom_path, 'a', group='fl_{}'.format(i),
encoding=encode)
# Add other diagnostics (Fabien in 2021: why?)
diag_ds.to_netcdf(geom_path, 'a')
if diag_path is not None:
diag_ds.to_netcdf(diag_path)
return geom_ds, diag_ds
def run_until_equilibrium(self, rate=0.001, ystep=5, max_ite=200):
""" Runs the model until an equilibrium state is reached.
Be careful: This only works for CONSTANT (not time-dependant)
mass-balance models.
Otherwise the returned state will not be in equilibrium! Don't try to
calculate an equilibrium state with a RandomMassBalance model!
"""
ite = 0
was_close_zero = 0
t_rate = 1
while (t_rate > rate) and (ite <= max_ite) and (was_close_zero < 5):
ite += 1
v_bef = self.volume_m3
self.run_until(self.yr + ystep)
v_af = self.volume_m3
if np.isclose(v_bef, 0., atol=1):
t_rate = 1
was_close_zero += 1
else:
t_rate = np.abs(v_af - v_bef) / v_bef
if ite > max_ite:
raise RuntimeError('Did not find equilibrium.')
def flux_gate_with_build_up(year, flux_value=None, flux_gate_yr=None):
"""Default scalar flux gate with build up period"""
fac = 1 - (flux_gate_yr - year) / flux_gate_yr
return flux_value * utils.clip_scalar(fac, 0, 1)
class FluxBasedModel(FlowlineModel):
"""The flowline model used by OGGM in production.
It solves for the SIA along the flowline(s) using a staggered grid. It
computes the *ice flux* between grid points and transports the mass
accordingly (also between flowlines).
This model is numerically less stable than fancier schemes, but it
is fast and works with multiple flowlines of any bed shape (rectangular,
parabolic, trapeze, and any combination of them).
We test that it conserves mass in most cases, but not on very stiff cliffs.
"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
fs=0., inplace=False, fixed_dt=None, cfl_number=None,
min_dt=None, flux_gate_thickness=None,
flux_gate=None, flux_gate_build_up=100,
do_kcalving=None, calving_k=None, calving_use_limiter=None,
calving_limiter_frac=None, water_level=None,
**kwargs):
"""Instanciate the model.
Parameters
----------
flowlines : list
the glacier flowlines
mb_model : MassBalanceModel
the mass-balance model
y0 : int
initial year of the simulation
glen_a : float
Glen's creep parameter
fs : float
Oerlemans sliding parameter
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
fixed_dt : float
set to a value (in seconds) to prevent adaptive time-stepping.
cfl_number : float
Defaults to cfg.PARAMS['cfl_number'].
For adaptive time stepping (the default), dt is chosen from the
CFL criterion (dt = cfl_number * dx / max_u).
To choose the "best" CFL number we would need a stability
analysis - we used an empirical analysis (see blog post) and
settled on 0.02 for the default cfg.PARAMS['cfl_number'].
min_dt : float
Defaults to cfg.PARAMS['cfl_min_dt'].
At high velocities, time steps can become very small and your
model might run very slowly. In production, it might be useful to
set a limit below which the model will just error.
is_tidewater: bool, default: False
is this a tidewater glacier?
is_lake_terminating: bool, default: False
is this a lake terminating glacier?
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries: bool, default: True
raise an error when the glacier grows bigger than the domain
boundaries
flux_gate_thickness : float or array
flux of ice from the left domain boundary (and tributaries).
Units of m of ice thickness. Note that unrealistic values won't be
met by the model, so this is really just a rough guidance.
It's better to use `flux_gate` instead.
flux_gate : float or function or array of floats or array of functions
flux of ice from the left domain boundary (and tributaries)
(unit: m3 of ice per second). If set to a high value, consider
changing the flux_gate_buildup time. You can also provide
a function (or an array of functions) returning the flux
(unit: m3 of ice per second) as a function of time.
This is overriden by `flux_gate_thickness` if provided.
flux_gate_buildup : int
number of years used to build up the flux gate to full value
do_kcalving : bool
switch on the k-calving parameterisation. Ignored if not a
tidewater glacier. Use the option from PARAMS per default
calving_k : float
the calving proportionality constant (units: yr-1). Use the
one from PARAMS per default
calving_use_limiter : bool
whether to switch on the calving limiter on the parameterisation
makes the calving fronts thicker but the model is more stable
calving_limiter_frac : float
limit the front slope to a fraction of the calving front.
"3" means 1/3. Setting it to 0 limits the slope to sea-level.
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
The default is 0. For lake terminating glaciers,
it is inferred from PARAMS['free_board_lake_terminating'].
The best way to set the water level for real glaciers is to use
the same as used for the inversion (this is what
`flowline_model_run` does for you)
"""
super(FluxBasedModel, self).__init__(flowlines, mb_model=mb_model,
y0=y0, glen_a=glen_a, fs=fs,
inplace=inplace,
water_level=water_level,
**kwargs)
self.fixed_dt = fixed_dt
if min_dt is None:
min_dt = cfg.PARAMS['cfl_min_dt']
if cfl_number is None:
cfl_number = cfg.PARAMS['cfl_number']
self.min_dt = min_dt
self.cfl_number = cfl_number
# Do we want to use shape factors?
self.sf_func = None
use_sf = cfg.PARAMS.get('use_shape_factor_for_fluxbasedmodel')
if use_sf == 'Adhikari' or use_sf == 'Nye':
self.sf_func = utils.shape_factor_adhikari
elif use_sf == 'Huss':
self.sf_func = utils.shape_factor_huss
# Calving params
if do_kcalving is None:
do_kcalving = cfg.PARAMS['use_kcalving_for_run']
self.do_calving = do_kcalving and self.is_tidewater
if calving_k is None:
calving_k = cfg.PARAMS['calving_k']
self.calving_k = calving_k / cfg.SEC_IN_YEAR
if calving_use_limiter is None:
calving_use_limiter = cfg.PARAMS['calving_use_limiter']
self.calving_use_limiter = calving_use_limiter
if calving_limiter_frac is None:
calving_limiter_frac = cfg.PARAMS['calving_limiter_frac']
if calving_limiter_frac > 0:
raise NotImplementedError('calving limiter other than 0 not '
'implemented yet')
self.calving_limiter_frac = calving_limiter_frac
# Flux gate
self.flux_gate = utils.tolist(flux_gate, length=len(self.fls))
self.flux_gate_m3_since_y0 = 0.
if flux_gate_thickness is not None:
# Compute the theoretical ice flux from the slope at the top
flux_gate_thickness = utils.tolist(flux_gate_thickness,
length=len(self.fls))
self.flux_gate = []
for fl, fgt in zip(self.fls, flux_gate_thickness):
# We set the thickness to the desired value so that
# the widths work ok
fl = copy.deepcopy(fl)
fl.thick = fl.thick * 0 + fgt
slope = (fl.surface_h[0] - fl.surface_h[1]) / fl.dx_meter
if slope == 0:
raise ValueError('I need a slope to compute the flux')
flux = find_sia_flux_from_thickness(slope,
fl.widths_m[0],
fgt,
shape=fl.shape_str[0],
glen_a=self.glen_a,
fs=self.fs)
self.flux_gate.append(flux)
# convert the floats to function calls
for i, fg in enumerate(self.flux_gate):
if fg is None:
continue
try:
# Do we have a function? If yes all good
fg(self.yr)
except TypeError:
# If not, make one
self.flux_gate[i] = partial(flux_gate_with_build_up,
flux_value=fg,
flux_gate_yr=(flux_gate_build_up +
self.y0))
# Optim
self.slope_stag = []
self.thick_stag = []
self.section_stag = []
self.u_stag = []
self.shapefac_stag = []
self.flux_stag = []
self.trib_flux = []
for fl, trib in zip(self.fls, self._tributary_indices):
nx = fl.nx
# This is not staggered
self.trib_flux.append(np.zeros(nx))
# We add an additional fake grid point at the end of tributaries
if trib[0] is not None:
nx = fl.nx + 1
# +1 is for the staggered grid
self.slope_stag.append(np.zeros(nx+1))
self.thick_stag.append(np.zeros(nx+1))
self.section_stag.append(np.zeros(nx+1))
self.u_stag.append(np.zeros(nx+1))
self.shapefac_stag.append(np.ones(nx+1)) # beware the ones!
self.flux_stag.append(np.zeros(nx+1))
def step(self, dt):
"""Advance one step."""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
# Simple container
mbs = []
# Loop over tributaries to determine the flux rate
for fl_id, fl in enumerate(self.fls):
# This is possibly less efficient than zip() but much clearer
trib = self._tributary_indices[fl_id]
slope_stag = self.slope_stag[fl_id]
thick_stag = self.thick_stag[fl_id]
section_stag = self.section_stag[fl_id]
sf_stag = self.shapefac_stag[fl_id]
flux_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
u_stag = self.u_stag[fl_id]
flux_gate = self.flux_gate[fl_id]
# Flowline state
surface_h = fl.surface_h
thick = fl.thick
section = fl.section
dx = fl.dx_meter
# If it is a tributary, we use the branch it flows into to compute
# the slope of the last grid point
is_trib = trib[0] is not None
if is_trib:
fl_to = self.fls[trib[0]]
ide = fl.flows_to_indice
surface_h = np.append(surface_h, fl_to.surface_h[ide])
thick = np.append(thick, thick[-1])
section = np.append(section, section[-1])
elif self.do_calving and self.calving_use_limiter:
# We lower the max possible ice deformation
# by clipping the surface slope here. It is completely
# arbitrary but reduces ice deformation at the calving front.
# I think that in essence, it is also partly
# a "calving process", because this ice deformation must
# be less at the calving front. The result is that calving
# front "free boards" are quite high.
# Note that 0 is arbitrary, it could be any value below SL
surface_h = utils.clip_min(surface_h, self.water_level)
# Staggered gradient
slope_stag[0] = 0
slope_stag[1:-1] = (surface_h[0:-1] - surface_h[1:]) / dx
slope_stag[-1] = slope_stag[-2]
# Staggered thick
thick_stag[1:-1] = (thick[0:-1] + thick[1:]) / 2.
thick_stag[[0, -1]] = thick[[0, -1]]
if self.sf_func is not None:
# TODO: maybe compute new shape factors only every year?
sf = self.sf_func(fl.widths_m, fl.thick, fl.is_rectangular)
if is_trib:
# for inflowing tributary, the sf makes no sense
sf = np.append(sf, 1.)
sf_stag[1:-1] = (sf[0:-1] + sf[1:]) / 2.
sf_stag[[0, -1]] = sf[[0, -1]]
# Staggered velocity (Deformation + Sliding)
# _fd = 2/(N+2) * self.glen_a
N = self.glen_n
rhogh = (self.rho*G*slope_stag)**N
u_stag[:] = (thick_stag**(N+1)) * self._fd * rhogh * sf_stag**N + \
(thick_stag**(N-1)) * self.fs * rhogh
# Staggered section
section_stag[1:-1] = (section[0:-1] + section[1:]) / 2.
section_stag[[0, -1]] = section[[0, -1]]
# Staggered flux rate
flux_stag[:] = u_stag * section_stag
# Add boundary condition
if flux_gate is not None:
flux_stag[0] = flux_gate(self.yr)
# CFL condition
if not self.fixed_dt:
maxu = np.max(np.abs(u_stag))
if maxu > cfg.FLOAT_EPS:
cfl_dt = self.cfl_number * dx / maxu
else:
cfl_dt = dt
# Update dt only if necessary
if cfl_dt < dt:
dt = cfl_dt
if cfl_dt < self.min_dt:
raise RuntimeError(
'CFL error: required time step smaller '
'than the minimum allowed: '
'{:.1f}s vs {:.1f}s. Happening at '
'simulation year {:.1f}, fl_id {}, '
'bin_id {} and max_u {:.3f} m yr-1.'
''.format(cfl_dt, self.min_dt, self.yr, fl_id,
np.argmax(np.abs(u_stag)),
maxu * cfg.SEC_IN_YEAR))
# Since we are in this loop, reset the tributary flux
trib_flux[:] = 0
# We compute MB in this loop, before mass-redistribution occurs,
# so that MB models which rely on glacier geometry to decide things
# (like PyGEM) can do wo with a clean glacier state
mbs.append(self.get_mb(fl.surface_h, self.yr,
fl_id=fl_id, fls=self.fls))
# Time step
if self.fixed_dt:
# change only if step dt is larger than the chosen dt
if self.fixed_dt < dt:
dt = self.fixed_dt
# A second loop for the mass exchange
for fl_id, fl in enumerate(self.fls):
flx_stag = self.flux_stag[fl_id]
trib_flux = self.trib_flux[fl_id]
tr = self._tributary_indices[fl_id]
dx = fl.dx_meter
is_trib = tr[0] is not None
# For these we had an additional grid point
if is_trib:
flx_stag = flx_stag[:-1]
# Mass-balance
widths = fl.widths_m
mb = mbs[fl_id]
# Allow parabolic beds to grow
mb = dt * mb * np.where((mb > 0.) & (widths == 0), 10., widths)
# Update section with ice flow and mass balance
new_section = (fl.section + (flx_stag[0:-1] - flx_stag[1:])*dt/dx +
trib_flux*dt/dx + mb)
# Keep positive values only and store
fl.section = utils.clip_min(new_section, 0)
# If we use a flux-gate, store the total volume that came in
self.flux_gate_m3_since_y0 += flx_stag[0] * dt
# Add the last flux to the tributary
# this works because the lines are sorted in order
if is_trib:
# tr tuple: line_index, start, stop, gaussian_kernel
self.trib_flux[tr[0]][tr[1]:tr[2]] += \
utils.clip_min(flx_stag[-1], 0) * tr[3]
# --- The rest is for calving only ---
self.calving_rate_myr = 0.
# If tributary, do calving only if we are not transferring mass
if is_trib and flx_stag[-1] > 0:
continue
# No need to do calving in these cases either
if not self.do_calving or not fl.has_ice():
continue
# We do calving only if the last glacier bed pixel is below water
# (this is to avoid calving elsewhere than at the front)
if fl.bed_h[fl.thick > 0][-1] > self.water_level:
continue
# We do calving only if there is some ice above wl
last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
(fl.thick > 0))[0][-1]
if fl.bed_h[last_above_wl] > self.water_level:
continue
# OK, we're really calving
section = fl.section
# Calving law
h = fl.thick[last_above_wl]
d = h - (fl.surface_h[last_above_wl] - self.water_level)
k = self.calving_k
q_calving = k * d * h * fl.widths_m[last_above_wl]
# Add to the bucket and the diagnostics
fl.calving_bucket_m3 += q_calving * dt
self.calving_m3_since_y0 += q_calving * dt
self.calving_rate_myr = (q_calving / section[last_above_wl] *
cfg.SEC_IN_YEAR)
# See if we have ice below sea-water to clean out first
below_sl = (fl.surface_h < self.water_level) & (fl.thick > 0)
to_remove = np.sum(section[below_sl]) * fl.dx_meter
if 0 < to_remove < fl.calving_bucket_m3:
# This is easy, we remove everything
section[below_sl] = 0
fl.calving_bucket_m3 -= to_remove
elif to_remove > 0:
# We can only remove part of if
section[below_sl] = 0
section[last_above_wl+1] = ((to_remove - fl.calving_bucket_m3)
/ fl.dx_meter)
fl.calving_bucket_m3 = 0
# The rest of the bucket might calve an entire grid point (or more?)
vol_last = section[last_above_wl] * fl.dx_meter
while fl.calving_bucket_m3 > vol_last:
fl.calving_bucket_m3 -= vol_last
section[last_above_wl] = 0
# OK check if we need to continue (unlikely)
last_above_wl -= 1
vol_last = section[last_above_wl] * fl.dx_meter
# We update the glacier with our changes
fl.section = section
# Next step
self.t += dt
return dt
def get_diagnostics(self, fl_id=-1):
"""Obtain model diagnostics in a pandas DataFrame.
Parameters
----------
fl_id : int
the index of the flowline of interest, from 0 to n_flowline-1.
Default is to take the last (main) one
Returns
-------
a pandas DataFrame, which index is distance along flowline (m). Units:
- surface_h, bed_h, ice_tick, section_width: m
- section_area: m2
- slope: -
- ice_flux, tributary_flux: m3 of *ice* per second
- ice_velocity: m per second (depth-section integrated)
"""
import pandas as pd
fl = self.fls[fl_id]
nx = fl.nx
df = pd.DataFrame(index=fl.dx_meter * np.arange(nx))
df.index.name = 'distance_along_flowline'
df['surface_h'] = fl.surface_h
df['bed_h'] = fl.bed_h
df['ice_thick'] = fl.thick
df['section_width'] = fl.widths_m
df['section_area'] = fl.section
# Staggered
var = self.slope_stag[fl_id]
df['slope'] = (var[1:nx+1] + var[:nx])/2
var = self.flux_stag[fl_id]
df['ice_flux'] = (var[1:nx+1] + var[:nx])/2
var = self.u_stag[fl_id]
df['ice_velocity'] = (var[1:nx+1] + var[:nx])/2
var = self.shapefac_stag[fl_id]
df['shape_fac'] = (var[1:nx+1] + var[:nx])/2
# Not Staggered
df['tributary_flux'] = self.trib_flux[fl_id]
return df
class MassConservationChecker(FluxBasedModel):
"""This checks if the FluxBasedModel is conserving mass."""
def __init__(self, flowlines, **kwargs):
""" Instanciate.
Parameters
----------
Properties
----------
#TODO: document properties
"""
super(MassConservationChecker, self).__init__(flowlines, **kwargs)
self.total_mass = 0.
def step(self, dt):
mbs = []
sections = []
for fl in self.fls:
# Mass balance
widths = fl.widths_m
mb = self.get_mb(fl.surface_h, self.yr, fl_id=id(fl))
mbs.append(mb * widths)
sections.append(np.copy(fl.section))
dx = fl.dx_meter
dt = super(MassConservationChecker, self).step(dt)
for mb, sec in zip(mbs, sections):
mb = dt * mb
# there can't be more negative mb than there is section
# this isn't an exact solution unfortunately
# TODO: exact solution for mass conservation
mb = utils.clip_min(mb, -sec)
self.total_mass += np.sum(mb * dx)
class KarthausModel(FlowlineModel):
"""The actual model"""
def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None, fs=0.,
fixed_dt=None, min_dt=SEC_IN_DAY, max_dt=31*SEC_IN_DAY,
inplace=False, **kwargs):
""" Instanciate.
Parameters
----------
Properties
----------
#TODO: document properties
#TODO: Changed from assumed N=3 to N
"""
if len(flowlines) > 1:
raise ValueError('Karthaus model does not work with tributaries.')
super(KarthausModel, self).__init__(flowlines, mb_model=mb_model,
y0=y0, glen_a=glen_a, fs=fs,
inplace=inplace, **kwargs)
self.dt_warning = False,
if fixed_dt is not None:
min_dt = fixed_dt
max_dt = fixed_dt
self.min_dt = min_dt
self.max_dt = max_dt
def step(self, dt):
"""Advance one step."""
# Just a check to avoid useless computations
if dt <= 0:
raise InvalidParamsError('dt needs to be strictly positive')
# This is to guarantee a precise arrival on a specific date if asked
min_dt = dt if dt < self.min_dt else self.min_dt
dt = utils.clip_scalar(dt, min_dt, self.max_dt)
fl = self.fls[0]
dx = fl.dx_meter
width = fl.widths_m
thick = fl.thick
MassBalance = self.get_mb(fl.surface_h, self.yr, fl_id=id(fl))
SurfaceHeight = fl.surface_h
# Surface gradient
SurfaceGradient = np.zeros(fl.nx)
SurfaceGradient[1:fl.nx-1] = (SurfaceHeight[2:] -
SurfaceHeight[:fl.nx-2])/(2*dx)
SurfaceGradient[-1] = 0
SurfaceGradient[0] = 0
# Diffusivity
N = self.glen_n
Diffusivity = width * (self.rho*G)**3 * thick**3 * SurfaceGradient**2
Diffusivity *= 2/(N+2) * self.glen_a * thick**2 + self.fs
# on stagger
DiffusivityStaggered = np.zeros(fl.nx)
SurfaceGradientStaggered = np.zeros(fl.nx)
DiffusivityStaggered[1:] = (Diffusivity[:fl.nx-1] + Diffusivity[1:])/2.
DiffusivityStaggered[0] = Diffusivity[0]
SurfaceGradientStaggered[1:] = (SurfaceHeight[1:] -
SurfaceHeight[:fl.nx-1])/dx
SurfaceGradientStaggered[0] = 0
GradxDiff = SurfaceGradientStaggered * DiffusivityStaggered
# Yo
NewIceThickness = np.zeros(fl.nx)
NewIceThickness[:fl.nx-1] = (thick[:fl.nx-1] + (dt/width[0:fl.nx-1]) *
(GradxDiff[1:]-GradxDiff[:fl.nx-1])/dx +
dt * MassBalance[:fl.nx-1])
NewIceThickness[-1] = thick[fl.nx-2]
fl.thick = utils.clip_min(NewIceThickness, 0)
# Next step
self.t += dt
return dt
class FileModel(object):
"""Duck FlowlineModel which actually reads data out of a nc file."""
def __init__(self, path):
""" Instanciate.
Parameters
----------
Properties
----------
#TODO: document properties
"""
self.fls = glacier_from_netcdf(path)
fl_tss = []
for flid, fl in enumerate(self.fls):
with xr.open_dataset(path, group='fl_{}'.format(flid)) as ds:
if flid == 0:
# Populate time
self.time = ds.time.values
try:
self.years = ds.year.values
except AttributeError:
raise InvalidWorkflowError('The provided model output '
'file is incomplete (likely '
'when the previous '
'run failed) or corrupt.')
try:
self.months = ds.month.values
except AttributeError:
self.months = self.years * 0 + 1
# Read out the data
fl_data = {
'ts_section': ds.ts_section.values,
'ts_width_m': ds.ts_width_m.values,
}
try:
fl_data['ts_calving_bucket_m3'] = ds.ts_calving_bucket_m3.values
except AttributeError:
fl_data['ts_calving_bucket_m3'] = self.years * 0
fl_tss.append(fl_data)
self.fl_tss = fl_tss
self.last_yr = float(ds.time[-1])
# Calving diags
try:
with xr.open_dataset(path) as ds:
self._calving_m3_since_y0 = ds.calving_m3.values
self.do_calving = True
except AttributeError:
self._calving_m3_since_y0 = 0
self.do_calving = False
# time
self.reset_y0()
def __enter__(self):
warnings.warn('FileModel no longer needs to be run as a '
'context manager. You can safely remove the '
'`with` statement.', FutureWarning)
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def reset_y0(self, y0=None):
"""Reset the initial model time"""
if y0 is None:
y0 = float(self.time[0])
self.y0 = y0
self.yr = y0
self._current_index = 0
@property
def area_m2(self):
return np.sum([f.area_m2 for f in self.fls])
@property
def volume_m3(self):
return np.sum([f.volume_m3 for f in self.fls])
@property
def volume_km3(self):
return self.volume_m3 * 1e-9
@property
def area_km2(self):
return self.area_m2 * 1e-6
@property
def length_m(self):
return self.fls[-1].length_m
@property
def calving_m3_since_y0(self):
if self.do_calving:
return self._calving_m3_since_y0[self._current_index]
else:
return 0
def run_until(self, year=None, month=None):
"""Mimics the model's behavior.
Is quite slow tbh.
"""
try:
if month is not None:
pok = np.nonzero((self.years == year) & (self.months == month))[0][0]
else:
pok = np.nonzero(self.time == year)[0][0]
except IndexError as err:
raise IndexError('Index year={}, month={} not available in '
'FileModel.'.format(year, month)) from err
self.yr = self.time[pok]
self._current_index = pok
for fl, fl_ts in zip(self.fls, self.fl_tss):
fl.section = fl_ts['ts_section'][pok, :]
fl.calving_bucket_m3 = fl_ts['ts_calving_bucket_m3'][pok]
def area_m2_ts(self, rollmin=0):
"""rollmin is the number of years you want to smooth onto"""
sel = 0
for fl, fl_ts in zip(self.fls, self.fl_tss):
widths = np.where(fl_ts['ts_section'] > 0., fl_ts['ts_width_m'], 0.)
sel += widths.sum(axis=1) * fl.dx_meter
sel = pd.Series(data=sel, index=self.time, name='area_m2')
if rollmin != 0:
sel = sel.rolling(rollmin).min()
sel.iloc[0:rollmin] = sel.iloc[rollmin]
return sel
def area_km2_ts(self, **kwargs):
return self.area_m2_ts(**kwargs) * 1e-6
def volume_m3_ts(self):
sel = 0
for fl, fl_ts in zip(self.fls, self.fl_tss):
sel += fl_ts['ts_section'].sum(axis=1) * fl.dx_meter
sel -= fl_ts['ts_calving_bucket_m3']
return pd.Series(data=sel, index=self.time, name='volume_m3')
def volume_km3_ts(self):
return self.volume_m3_ts() * 1e-9
def length_m_ts(self, rollmin=0):
raise NotImplementedError('length_m_ts is no longer available in the '
'full output files. To obtain the length '
'time series, refer to the diagnostic '
'output file.')
def flowline_from_dataset(ds):
"""Instanciates a flowline from an xarray Dataset."""
cl = globals()[ds.attrs['class']]
line = shpg.LineString(ds['linecoords'].values)
args = dict(line=line, dx=ds.dx, map_dx=ds.map_dx,
surface_h=ds['surface_h'].values,
bed_h=ds['bed_h'].values)
have = {'c', 'x', 'surface_h', 'linecoords', 'bed_h', 'z', 'p', 'n',
'time', 'month', 'year', 'ts_width_m', 'ts_section',
'ts_calving_bucket_m3'}
missing_vars = set(ds.variables.keys()).difference(have)
for k in missing_vars:
data = ds[k].values
if ds[k].dims[0] == 'z':
data = data[0]
args[k] = data
return cl(**args)
def glacier_from_netcdf(path):
"""Instanciates a list of flowlines from an xarray Dataset."""
with xr.open_dataset(path) as ds:
fls = []
for flid in ds['flowlines'].values:
with xr.open_dataset(path, group='fl_{}'.format(flid)) as _ds:
fls.append(flowline_from_dataset(_ds))
for i, fid in enumerate(ds['flows_to_id'].values):
if fid != -1:
fls[i].set_flows_to(fls[fid])
# Adds the line level
for fl in fls:
fl.order = line_order(fl)
return fls
def calving_glacier_downstream_line(line, n_points):
"""Extends a calving glacier flowline past the terminus."""
if line is None:
return None
x, y = line.coords.xy
dx = x[-1] - x[-2]
dy = y[-1] - y[-2]
x = np.append(x, x[-1] + dx * np.arange(1, n_points+1))
y = np.append(y, y[-1] + dy * np.arange(1, n_points+1))
return shpg.LineString(np.array([x, y]).T)
def old_init_present_time_glacier(gdir):
"""Init_present_time_glacier when trapezoid inversion was not possible."""
# Some vars
map_dx = gdir.grid.dx
def_lambda = cfg.PARAMS['trapezoid_lambdas']
min_shape = cfg.PARAMS['mixed_min_shape']
cls = gdir.read_pickle('inversion_flowlines')
invs = gdir.read_pickle('inversion_output')
# Fill the tributaries
new_fls = []
flows_to_ids = []
for cl, inv in zip(cls, invs):
# Get the data to make the model flowlines
line = cl.line
section = inv['volume'] / (cl.dx * map_dx)
surface_h = cl.surface_h
bed_h = surface_h - inv['thick']
widths_m = cl.widths * map_dx
assert np.all(widths_m > 0)
bed_shape = 4 * inv['thick'] / (cl.widths * map_dx) ** 2
lambdas = inv['thick'] * np.NaN
lambdas[bed_shape < min_shape] = def_lambda
lambdas[inv['is_rectangular']] = 0.
# Last pix of not tidewater are always parab (see below)
if not gdir.is_tidewater and inv['is_last']:
lambdas[-5:] = np.nan
# Update bed_h where we now have a trapeze
w0_m = cl.widths * map_dx - lambdas * inv['thick']
b = 2 * w0_m
a = 2 * lambdas
with np.errstate(divide='ignore', invalid='ignore'):
thick = (np.sqrt(b ** 2 + 4 * a * section) - b) / a
ptrap = (lambdas != 0) & np.isfinite(lambdas)
bed_h[ptrap] = cl.surface_h[ptrap] - thick[ptrap]
# For the very last pixs of a glacier, the section might be zero after
# the inversion, and the bedshapes are chaotic. We interpolate from
# the downstream. This is not volume conservative
if not gdir.is_tidewater and inv['is_last']:
dic_ds = gdir.read_pickle('downstream_line')
bed_shape[-5:] = np.nan
# Interpolate
bed_shape = utils.interp_nans(np.append(bed_shape,
dic_ds['bedshapes'][0]))
bed_shape = utils.clip_min(bed_shape[:-1], min_shape)
# Correct the section volume
h = inv['thick']
section[-5:] = (2 / 3 * h * np.sqrt(4 * h / bed_shape))[-5:]
# Add the downstream
bed_shape = np.append(bed_shape, dic_ds['bedshapes'])
lambdas = np.append(lambdas, dic_ds['bedshapes'] * np.NaN)
section = np.append(section, dic_ds['bedshapes'] * 0.)
surface_h = np.append(surface_h, dic_ds['surface_h'])
bed_h = np.append(bed_h, dic_ds['surface_h'])
widths_m = np.append(widths_m, dic_ds['bedshapes'] * 0.)
line = dic_ds['full_line']
if gdir.is_tidewater and inv['is_last']:
# Continue the bed a little
n_points = cfg.PARAMS['calving_line_extension']
cf_slope = cfg.PARAMS['calving_front_slope']
deepening = n_points * cl.dx * map_dx * cf_slope
line = calving_glacier_downstream_line(line, n_points=n_points)
bed_shape = np.append(bed_shape, np.zeros(n_points))
lambdas = np.append(lambdas, np.zeros(n_points))
section = np.append(section, np.zeros(n_points))
# The bed slowly deepens
bed_down = np.linspace(bed_h[-1], bed_h[-1]-deepening, n_points)
bed_h = np.append(bed_h, bed_down)
surface_h = np.append(surface_h, bed_down)
widths_m = np.append(widths_m,
np.zeros(n_points) + np.mean(widths_m[-5:]))
nfl = MixedBedFlowline(line=line, dx=cl.dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=bed_shape,
is_trapezoid=np.isfinite(lambdas),
lambdas=lambdas,
widths_m=widths_m,
rgi_id=cl.rgi_id)
# Update attrs
nfl.mu_star = cl.mu_star
if cl.flows_to:
flows_to_ids.append(cls.index(cl.flows_to))
else:
flows_to_ids.append(None)
new_fls.append(nfl)
# Finalize the linkages
for fl, fid in zip(new_fls, flows_to_ids):
if fid:
fl.set_flows_to(new_fls[fid])
# Adds the line level
for fl in new_fls:
fl.order = line_order(fl)
# Write the data
gdir.write_pickle(new_fls, 'model_flowlines')
@entity_task(log, writes=['model_flowlines'])
def init_present_time_glacier(gdir):
"""Merges data from preprocessing tasks. First task after inversion!
This updates the `mode_flowlines` file and creates a stand-alone numerical
glacier ready to run.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Some vars
invs = gdir.read_pickle('inversion_output')
if invs[0].get('is_trapezoid', None) is None:
return old_init_present_time_glacier(gdir)
map_dx = gdir.grid.dx
def_lambda = cfg.PARAMS['trapezoid_lambdas']
cls = gdir.read_pickle('inversion_flowlines')
# Fill the tributaries
new_fls = []
flows_to_ids = []
for cl, inv in zip(cls, invs):
# Get the data to make the model flowlines
line = cl.line
section = inv['volume'] / (cl.dx * map_dx)
surface_h = cl.surface_h
bed_h = surface_h - inv['thick']
widths_m = cl.widths * map_dx
assert np.all(widths_m > 0)
bed_shape = 4 * inv['thick'] / (cl.widths * map_dx) ** 2
lambdas = inv['thick'] * np.NaN
lambdas[inv['is_trapezoid']] = def_lambda
lambdas[inv['is_rectangular']] = 0.
# Where the flux and the thickness is zero we just assume trapezoid:
lambdas[bed_shape == 0] = def_lambda
if not gdir.is_tidewater and inv['is_last']:
# for valley glaciers, simply add the downstream line
dic_ds = gdir.read_pickle('downstream_line')
bed_shape = np.append(bed_shape, dic_ds['bedshapes'])
lambdas = np.append(lambdas, dic_ds['bedshapes'] * np.NaN)
section = np.append(section, dic_ds['bedshapes'] * 0.)
surface_h = np.append(surface_h, dic_ds['surface_h'])
bed_h = np.append(bed_h, dic_ds['surface_h'])
widths_m = np.append(widths_m, dic_ds['bedshapes'] * 0.)
line = dic_ds['full_line']
if gdir.is_tidewater and inv['is_last']:
# Continue the bed a little
n_points = cfg.PARAMS['calving_line_extension']
cf_slope = cfg.PARAMS['calving_front_slope']
deepening = n_points * cl.dx * map_dx * cf_slope
line = calving_glacier_downstream_line(line, n_points=n_points)
bed_shape = np.append(bed_shape, np.zeros(n_points))
lambdas = np.append(lambdas, np.zeros(n_points))
section = np.append(section, np.zeros(n_points))
# The bed slowly deepens
bed_down = np.linspace(bed_h[-1], bed_h[-1]-deepening, n_points)
bed_h = np.append(bed_h, bed_down)
surface_h = np.append(surface_h, bed_down)
widths_m = np.append(widths_m,
np.zeros(n_points) + np.mean(widths_m[-5:]))
nfl = MixedBedFlowline(line=line, dx=cl.dx, map_dx=map_dx,
surface_h=surface_h, bed_h=bed_h,
section=section, bed_shape=bed_shape,
is_trapezoid=np.isfinite(lambdas),
lambdas=lambdas,
widths_m=widths_m,
rgi_id=cl.rgi_id)
# Update attrs
nfl.mu_star = cl.mu_star
if cl.flows_to:
flows_to_ids.append(cls.index(cl.flows_to))
else:
flows_to_ids.append(None)
new_fls.append(nfl)
# Finalize the linkages
for fl, fid in zip(new_fls, flows_to_ids):
if fid:
fl.set_flows_to(new_fls[fid])
# Adds the line level
for fl in new_fls:
fl.order = line_order(fl)
# Write the data
gdir.write_pickle(new_fls, 'model_flowlines')
def robust_model_run(*args, **kwargs):
warnings.warn('The task `robust_model_run` is deprecated.', FutureWarning)
return flowline_model_run(*args, **kwargs)
@entity_task(log)
def flowline_model_run(gdir, output_filesuffix=None, mb_model=None,
ys=None, ye=None, zero_initial_glacier=False,
init_model_fls=None, store_monthly_step=False,
store_model_geometry=None, water_level=None,
**kwargs):
"""Runs a model simulation with the default time stepping scheme.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
mb_model : :py:class:`core.MassBalanceModel`
a MassBalanceModel instance
ys : int
start year of the model run (default: from the config file)
ye : int
end year of the model run (default: from the config file)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
water_level : float
the water level. It should be zero m a.s.l, but:
- sometimes the frontal elevation is unrealistically high (or low).
- lake terminating glaciers
- other uncertainties
The default is to take the water level obtained from the ice
thickness inversion.
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
mb_elev_feedback = kwargs.get('mb_elev_feedback', 'annual')
if store_monthly_step and (mb_elev_feedback == 'annual'):
warnings.warn("The mass-balance used to drive the ice dynamics model "
"is updated yearly. If you want the output to be stored "
"monthly and also reflect reflect monthly processes,"
"set store_monthly_step=True and "
"mb_elev_feedback='monthly'. This is not recommended "
"though: for monthly MB applications, we recommend to "
"use the `run_with_hydro` task.")
if cfg.PARAMS['use_inversion_params_for_run']:
diag = gdir.get_diagnostics()
fs = diag.get('inversion_fs', cfg.PARAMS['fs'])
glen_a = diag.get('inversion_glen_a', cfg.PARAMS['glen_a'])
else:
fs = cfg.PARAMS['fs']
glen_a = cfg.PARAMS['glen_a']
kwargs.setdefault('fs', fs)
kwargs.setdefault('glen_a', glen_a)
if store_model_geometry is None:
store_model_geometry = cfg.PARAMS['store_model_geometry']
if store_model_geometry:
geom_path = gdir.get_filepath('model_geometry',
filesuffix=output_filesuffix,
delete=True)
else:
geom_path = False
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
if init_model_fls is None:
fls = gdir.read_pickle('model_flowlines')
else:
fls = copy.deepcopy(init_model_fls)
if zero_initial_glacier:
for fl in fls:
fl.thick = fl.thick * 0.
if (cfg.PARAMS['use_kcalving_for_run'] and gdir.is_tidewater and
water_level is None):
# check for water level
water_level = gdir.get_diagnostics().get('calving_water_level', None)
if water_level is None:
raise InvalidWorkflowError('This tidewater glacier seems to not '
'have been inverted with the '
'`find_inversion_calving` task. Set '
"PARAMS['use_kcalving_for_run'] to "
'`False` or set `water_level` '
'to prevent this error.')
model = FluxBasedModel(fls, mb_model=mb_model, y0=ys,
inplace=True,
is_tidewater=gdir.is_tidewater,
is_lake_terminating=gdir.is_lake_terminating,
water_level=water_level,
**kwargs)
with np.warnings.catch_warnings():
# For operational runs we ignore the warnings
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
model.run_until_and_store(ye,
geom_path=geom_path,
diag_path=diag_path,
store_monthly_step=store_monthly_step)
return model
@entity_task(log)
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, seed=None, temperature_bias=None,
precipitation_factor=None,
store_monthly_step=False,
store_model_geometry=None,
climate_filename='climate_historical',
climate_input_filesuffix='',
output_filesuffix='', init_model_fls=None,
zero_initial_glacier=False,
unique_samples=False, **kwargs):
"""Runs the random mass-balance model for a given number of years.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
seed : int
seed for the random generator. If you ignore this, the runs will be
different each time. Setting it to a fixed seed across glaciers can
be useful if you want to have the same climate years for all of them
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
unique_samples: bool
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=RandomMassBalance,
y0=y0, halfsize=halfsize,
bias=bias, seed=seed,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_constant_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, temperature_bias=None,
precipitation_factor=None,
store_monthly_step=False,
store_model_geometry=None,
init_model_filesuffix=None,
init_model_yr=None,
output_filesuffix='',
climate_filename='climate_historical',
climate_input_filesuffix='',
init_model_fls=None,
zero_initial_glacier=False, **kwargs):
"""Runs the constant mass-balance model for a given number of years.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int
length of the simulation (default: as long as needed for reaching
equilibrium)
y0 : int
central year of the requested climate period. The default is to be
centred on t*.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory)
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=ConstantMassBalance,
y0=y0, halfsize=halfsize,
bias=bias, filename=climate_filename,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=0, ye=nyears,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_from_climate_data(gdir, ys=None, ye=None, min_ys=None, max_ys=None,
store_monthly_step=False,
store_model_geometry=None,
climate_filename='climate_historical',
climate_input_filesuffix='', output_filesuffix='',
init_model_filesuffix=None, init_model_yr=None,
init_model_fls=None, zero_initial_glacier=False,
bias=None, temperature_bias=None,
precipitation_factor=None, **kwargs):
""" Runs a glacier with climate input from e.g. CRU or a GCM.
This will initialize a
:py:class:`oggm.core.massbalance.MultipleFlowlineMassBalance`,
and run a :py:func:`oggm.core.flowline.flowline_model_run`.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ys : int
start year of the model run (default: from the glacier geometry
date if init_model_filesuffix is None, else init_model_yr)
ye : int
end year of the model run (default: last year of the provided
climate file)
min_ys : int
if you want to impose a minimum start year, regardless if the glacier
inventory date is earlier (e.g. if climate data does not reach).
max_ys : int
if you want to impose a maximum start year, regardless if the glacier
inventory date is later (e.g. if climate data does not reach).
store_monthly_step : bool
whether to store the diagnostic data at a monthly time step or not
(default is yearly)
store_model_geometry : bool
whether to store the full model geometry run file to disk or not.
(new in OGGM v1.4.1: default is to follow
cfg.PARAMS['store_model_geometry'])
climate_filename : str
name of the climate file, e.g. 'climate_historical' (default) or
'gcm_data'
climate_input_filesuffix: str
filesuffix for the input climate file
output_filesuffix : str
for the output file
init_model_filesuffix : str
if you want to start from a previous model run state. Can be
combined with `init_model_yr`
init_model_yr : int
the year of the initial run you want to start from. The default
is to take the last year of the simulation.
init_model_fls : []
list of flowlines to use to initialise the model (the default is the
present_time_glacier file from the glacier directory).
Ignored if `init_model_filesuffix` is set
zero_initial_glacier : bool
if true, the ice thickness is set to zero before the simulation
bias : float
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero
temperature_bias : float
add a bias to the temperature timeseries
precipitation_factor: float
multiply a factor to the precipitation time series
default is None and means that the precipitation factor from the
calibration is applied which is cfg.PARAMS['prcp_scaling_factor']
kwargs : dict
kwargs to pass to the FluxBasedModel instance
"""
if init_model_filesuffix is not None:
fp = gdir.get_filepath('model_geometry',
filesuffix=init_model_filesuffix)
fmod = FileModel(fp)
if init_model_yr is None:
init_model_yr = fmod.last_yr
fmod.run_until(init_model_yr)
init_model_fls = fmod.fls
if ys is None:
ys = init_model_yr
# Take from rgi date if not set yet
if ys is None:
try:
ys = gdir.rgi_date.year
except AttributeError:
ys = gdir.rgi_date
# The RGI timestamp is in calendar date - we convert to hydro date,
# i.e. 2003 becomes 2004 if hydro_month is not 1 (January)
# (so that we don't count the MB year 2003 in the simulation)
# See also: https://github.com/OGGM/oggm/issues/1020
# even if hydro_month is 1, we prefer to start from Jan 2004
# as in the alps the rgi is from Aug 2003
ys += 1
# Final crop
if min_ys is not None:
ys = ys if ys > min_ys else min_ys
if max_ys is not None:
ys = ys if ys < max_ys else max_ys
mb = MultipleFlowlineMassBalance(gdir, mb_model_class=PastMassBalance,
filename=climate_filename, bias=bias,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
mb.temp_bias = temperature_bias
if precipitation_factor is not None:
mb.prcp_fac = precipitation_factor
if ye is None:
# Decide from climate (we can run the last year with data as well)
ye = mb.flowline_mb_models[0].ye + 1
return flowline_model_run(gdir, output_filesuffix=output_filesuffix,
mb_model=mb, ys=ys, ye=ye,
store_monthly_step=store_monthly_step,
store_model_geometry=store_model_geometry,
init_model_fls=init_model_fls,
zero_initial_glacier=zero_initial_glacier,
**kwargs)
@entity_task(log)
def run_with_hydro(gdir, run_task=None, store_monthly_hydro=False,
ref_area_from_y0=False, **kwargs):
"""Run the flowline model and add hydro diagnostics (experimental!).
TODOs:
- Add the possibility to merge with previous model runs
- Add the possibility to prescribe glacier area (e.g. with starting area)
- Add the possibility to record MB during run to improve performance
(requires change in API)
- ...
Parameters
----------
run_task : func
any of the `run_*`` tasks in the oggm.flowline module.
The mass-balance model used needs to have the `add_climate` output
kwarg available though.
store_monthly_hydro : bool
also compute monthly hydrological diagnostics. The monthly ouptputs
are stored in 2D fields (years, months)
ref_area_from_y0 : bool
the hydrological output is computed over a reference area, which
per default is the largest area covered by the glacier in the simulation
period. Use this kwarg to force a specifi area to the state of the
glacier at the provided simulation year.
**kwargs : all valid kwargs for ``run_task``
"""
# Make sure it'll return something
kwargs['return_value'] = True
# Check that kwargs are compatible
if kwargs.get('store_monthly_step', False):
raise InvalidParamsError('run_with_hydro only compatible with '
'store_monthly_step=False.')
if kwargs.get('mb_elev_feedback', 'annual') != 'annual':
raise InvalidParamsError('run_with_hydro only compatible with '
"mb_elev_feedback='annual' (yes, even "
"when asked for monthly hydro output).")
out = run_task(gdir, **kwargs)
if out is None:
raise InvalidWorkflowError('The run task ({}) did not run '
'successfully.'.format(run_task.__name__))
# Mass balance model used during the run
mb_mod = out.mb_model
# Glacier geometry during the run
suffix = kwargs.get('output_filesuffix', '')
# We start by fetching mass balance data and geometry for all years
# model_geometry files always retrieve yearly timesteps
fmod = FileModel(gdir.get_filepath('model_geometry', filesuffix=suffix))
# The last one is the final state - we can't compute MB for that
years = fmod.years[:-1]
# Geometry at y0 to start with + off-glacier snow bucket
bin_area_2ds = []
bin_elev_2ds = []
ref_areas = []
snow_buckets = []
for fl in fmod.fls:
# Glacier area on bins
bin_area = fl.bin_area_m2
ref_areas.append(bin_area)
snow_buckets.append(bin_area * 0)
# Output 2d data
shape = len(years), len(bin_area)
bin_area_2ds.append(np.empty(shape, np.float64))
bin_elev_2ds.append(np.empty(shape, np.float64))
# Ok now fetch all geometry data in a first loop
# We do that because we might want to get the largest possible area (default)
# and we want to minimize the number of calls to run_until
for i, yr in enumerate(years):
fmod.run_until(yr)
for fl_id, (fl, bin_area_2d, bin_elev_2d) in \
enumerate(zip(fmod.fls, bin_area_2ds, bin_elev_2ds)):
# Time varying bins
bin_area_2d[i, :] = fl.bin_area_m2
bin_elev_2d[i, :] = fl.surface_h
if not ref_area_from_y0:
# Ok we get the max area instead
for ref_area, bin_area_2d in zip(ref_areas, bin_area_2ds):
ref_area[:] = bin_area_2d.max(axis=0)
# Ok now we have arrays, we can work with that
# -> second time varying loop is for mass-balance
months = [1]
seconds = cfg.SEC_IN_YEAR
ntime = len(years) + 1
oshape = (ntime, 1)
if store_monthly_hydro:
months = np.arange(1, 13)
seconds = cfg.SEC_IN_MONTH
oshape = (ntime, 12)
out = {
'off_area': {
'description': 'Off-glacier area',
'unit': 'm 2',
'data': np.zeros(ntime),
},
'on_area': {
'description': 'On-glacier area',
'unit': 'm 2',
'data': np.zeros(ntime),
},
'melt_off_glacier': {
'description': 'Off-glacier melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_on_glacier': {
'description': 'On-glacier melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_residual_off_glacier': {
'description': 'Off-glacier melt due to MB model residual',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'melt_residual_on_glacier': {
'description': 'On-glacier melt due to MB model residual',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'liq_prcp_off_glacier': {
'description': 'Off-glacier liquid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'liq_prcp_on_glacier': {
'description': 'On-glacier liquid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snowfall_off_glacier': {
'description': 'Off-glacier solid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snowfall_on_glacier': {
'description': 'On-glacier solid precipitation',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
'snow_bucket': {
'description': 'Off-glacier snow reservoir (state variable)',
'unit': 'kg',
'data': np.zeros(oshape),
},
'model_mb': {
'description': 'Annual mass-balance from dynamical model',
'unit': 'kg yr-1',
'data': np.zeros(ntime),
},
'residual_mb': {
'description': 'Difference (before correction) between mb model and dyn model melt',
'unit': 'kg yr-1',
'data': np.zeros(oshape),
},
}
# Initialize
fmod.run_until(years[0])
prev_model_vol = fmod.volume_m3
for i, yr in enumerate(years):
# Now the loop over the months
for m in months:
# A bit silly but avoid double counting in monthly ts
off_area_out = 0
on_area_out = 0
for fl_id, (ref_area, snow_bucket, bin_area_2d, bin_elev_2d) in \
enumerate(zip(ref_areas, snow_buckets, bin_area_2ds, bin_elev_2ds)):
bin_area = bin_area_2d[i, :]
bin_elev = bin_elev_2d[i, :]
# Make sure we have no negative contribution when glaciers are out
off_area = utils.clip_min(ref_area - bin_area, 0)
try:
if store_monthly_hydro:
flt_yr = utils.date_to_floatyear(int(yr), m)
mb_out = mb_mod.get_monthly_mb(bin_elev, fl_id=fl_id,
year=flt_yr,
add_climate=True)
mb, _, _, prcp, prcpsol = mb_out
else:
mb_out = mb_mod.get_annual_mb(bin_elev, fl_id=fl_id,
year=yr, add_climate=True)
mb, _, _, prcp, prcpsol = mb_out
except ValueError as e:
if 'too many values to unpack' in str(e):
raise InvalidWorkflowError('Run with hydro needs a MB '
'model able to add climate '
'info to `get_annual_mb`.')
raise
# Here we use mass (kg yr-1) not ice volume
mb *= seconds * cfg.PARAMS['ice_density']
# Bias of the mb model is a fake melt term that we need to deal with
mb_bias = mb_mod.bias * seconds / cfg.SEC_IN_YEAR
liq_prcp_on_g = (prcp - prcpsol) * bin_area
liq_prcp_off_g = (prcp - prcpsol) * off_area
prcpsol_on_g = prcpsol * bin_area
prcpsol_off_g = prcpsol * off_area
# IMPORTANT: this does not guarantee that melt cannot be negative
# the reason is the MB residual that here can only be understood
# as a fake melt process.
# In particular at the monthly scale this can lead to negative
# or winter positive melt - we try to mitigate this
# issue at the end of the year
melt_on_g = (prcpsol - mb) * bin_area
melt_off_g = (prcpsol - mb) * off_area
# This is the bad boy
bias_on_g = mb_bias * bin_area
bias_off_g = mb_bias * off_area
# Update bucket with accumulation and melt
snow_bucket += prcpsol_off_g
# It can only melt that much
melt_off_g = np.where((snow_bucket - melt_off_g) >= 0, melt_off_g, snow_bucket)
# Update bucket
snow_bucket -= melt_off_g
# This is recomputed each month but well
off_area_out += np.sum(off_area)
on_area_out += np.sum(bin_area)
# Monthly out
out['melt_off_glacier']['data'][i, m-1] += np.sum(melt_off_g)
out['melt_on_glacier']['data'][i, m-1] += np.sum(melt_on_g)
out['melt_residual_off_glacier']['data'][i, m-1] += np.sum(bias_off_g)
out['melt_residual_on_glacier']['data'][i, m-1] += np.sum(bias_on_g)
out['liq_prcp_off_glacier']['data'][i, m-1] += np.sum(liq_prcp_off_g)
out['liq_prcp_on_glacier']['data'][i, m-1] += np.sum(liq_prcp_on_g)
out['snowfall_off_glacier']['data'][i, m-1] += np.sum(prcpsol_off_g)
out['snowfall_on_glacier']['data'][i, m-1] += np.sum(prcpsol_on_g)
# Snow bucket is a state variable - stored at end of timestamp
if store_monthly_hydro:
if m == 12:
out['snow_bucket']['data'][i+1, 0] += np.sum(snow_bucket)
else:
out['snow_bucket']['data'][i, m] += np.sum(snow_bucket)
else:
out['snow_bucket']['data'][i+1, m-1] += np.sum(snow_bucket)
# Update the annual data
out['off_area']['data'][i] = off_area_out
out['on_area']['data'][i] = on_area_out
# If monthly, put the residual where we can
if store_monthly_hydro:
for melt, bias in zip(
[
out['melt_on_glacier']['data'][i, :],
out['melt_off_glacier']['data'][i, :],
],
[
out['melt_residual_on_glacier']['data'][i, :],
out['melt_residual_off_glacier']['data'][i, :],
],
):
real_melt = melt - bias
real_melt_sum = np.sum(real_melt)
bias_sum = np.sum(bias)
if real_melt_sum > 0:
# Ok we correct the positive melt instead
fac = 1 + bias_sum / real_melt_sum
melt[:] = real_melt * fac
# Correct for mass-conservation and match the ice-dynamics model
fmod.run_until(yr + 1)
model_mb = (fmod.volume_m3 - prev_model_vol) * cfg.PARAMS['ice_density']
prev_model_vol = fmod.volume_m3
reconstructed_mb = (out['snowfall_on_glacier']['data'][i, :].sum() -
out['melt_on_glacier']['data'][i, :].sum())
residual_mb = model_mb - reconstructed_mb
# Now correct
if store_monthly_hydro:
# We try to correct the melt only where there is some
asum = out['melt_on_glacier']['data'][i, :].sum()
if asum > 1e-7 and (residual_mb / asum < 1):
# try to find a fac
fac = 1 - residual_mb / asum
corr = out['melt_on_glacier']['data'][i, :] * fac
residual_mb = out['melt_on_glacier']['data'][i, :] - corr
out['melt_on_glacier']['data'][i, :] = corr
else:
# We simply spread over the months
residual_mb /= 12
out['melt_on_glacier']['data'][i, :] = (out['melt_on_glacier']['data'][i, :] -
residual_mb)
else:
# We simply apply the residual - no choice here
out['melt_on_glacier']['data'][i, :] = (out['melt_on_glacier']['data'][i, :] -
residual_mb)
out['model_mb']['data'][i] = model_mb
out['residual_mb']['data'][i] = residual_mb
# Convert to xarray
out_vars = cfg.PARAMS['store_diagnostic_variables']
ods = xr.Dataset()
ods.coords['time'] = fmod.years
if store_monthly_hydro:
ods.coords['month_2d'] = ('month_2d', np.arange(1, 13))
# For the user later
sm = cfg.PARAMS['hydro_month_' + mb_mod.hemisphere]
ods.coords['calendar_month_2d'] = ('month_2d', (np.arange(12) + sm - 1) % 12 + 1)
for varname, d in out.items():
data = d.pop('data')
if varname not in out_vars:
continue
if len(data.shape) == 2:
# First the annual agg
if varname == 'snow_bucket':
# Snowbucket is a state variable
ods[varname] = ('time', data[:, 0])
else:
# Last year is never good
data[-1, :] = np.NaN
ods[varname] = ('time', np.sum(data, axis=1))
# Then the monthly ones
if store_monthly_hydro:
ods[varname + '_monthly'] = (('time', 'month_2d'), data)
else:
assert varname != 'snow_bucket'
data[-1] = np.NaN
ods[varname] = ('time', data)
for k, v in d.items():
ods[varname].attrs[k] = v
# Append the output to the existing diagnostics
fpath = gdir.get_filepath('model_diagnostics', filesuffix=suffix)
ods.to_netcdf(fpath, mode='a')
def merge_to_one_glacier(main, tribs, filename='climate_historical',
input_filesuffix=''):
"""Merge multiple tributary glacier flowlines to a main glacier
This function will merge multiple tributary glaciers to a main glacier
and write modified `model_flowlines` to the main GlacierDirectory.
The provided tributaries must have an intersecting downstream line.
To be sure about this, use `intersect_downstream_lines` first.
This function is mainly responsible to reproject the flowlines, set
flowline attributes and to copy additional files, like the necessary climate
files.
Parameters
----------
main : oggm.GlacierDirectory
The new GDir of the glacier of interest
tribs : list or dictionary containing oggm.GlacierDirectories
true tributary glaciers to the main glacier
filename: str
Baseline climate file
input_filesuffix: str
Filesuffix to the climate file
"""
# read flowlines of the Main glacier
fls = main.read_pickle('model_flowlines')
mfl = fls.pop(-1) # remove main line from list and treat seperately
for trib in tribs:
# read tributary flowlines and append to list
tfls = trib.read_pickle('model_flowlines')
# copy climate file and local_mustar to new gdir
# if we have a merge-merge situation we need to copy multiple files
rgiids = set([fl.rgi_id for fl in tfls])
for uid in rgiids:
if len(rgiids) == 1:
# we do not have a merge-merge situation
in_id = ''
out_id = trib.rgi_id
else:
in_id = '_' + uid
out_id = uid
climfile_in = filename + in_id + input_filesuffix + '.nc'
climfile_out = filename + '_' + out_id + input_filesuffix + '.nc'
shutil.copyfile(os.path.join(trib.dir, climfile_in),
os.path.join(main.dir, climfile_out))
_m = os.path.basename(trib.get_filepath('local_mustar')).split('.')
muin = _m[0] + in_id + '.' + _m[1]
muout = _m[0] + '_' + out_id + '.' + _m[1]
shutil.copyfile(os.path.join(trib.dir, muin),
os.path.join(main.dir, muout))
# sort flowlines descending
tfls.sort(key=lambda x: x.order, reverse=True)
# loop over tributaries and reproject to main glacier
for nr, tfl in enumerate(tfls):
# 1. Step: Change projection to the main glaciers grid
_line = salem.transform_geometry(tfl.line,
crs=trib.grid, to_crs=main.grid)
# 2. set new line
tfl.set_line(_line)
# 3. set map attributes
dx = [shpg.Point(tfl.line.coords[i]).distance(
shpg.Point(tfl.line.coords[i+1]))
for i, pt in enumerate(tfl.line.coords[:-1])] # get distance
# and check if equally spaced
if not np.allclose(dx, np.mean(dx), atol=1e-2):
raise RuntimeError('Flowline is not evenly spaced.')
tfl.dx = np.mean(dx).round(2)
tfl.map_dx = mfl.map_dx
tfl.dx_meter = tfl.map_dx * tfl.dx
# 3. remove attributes, they will be set again later
tfl.inflow_points = []
tfl.inflows = []
# 4. set flows to, mainly to update flows_to_point coordinates
if tfl.flows_to is not None:
tfl.set_flows_to(tfl.flows_to)
# append tributary flowlines to list
fls += tfls
# add main flowline to the end
fls = fls + [mfl]
# Finally write the flowlines
main.write_pickle(fls, 'model_flowlines')
def clean_merged_flowlines(gdir, buffer=None):
"""Order and cut merged flowlines to size.
After matching flowlines were found and merged to one glacier directory
this function makes them nice:
There should only be one flowline per bed, so overlapping lines have to be
cut, attributed to a another flowline and ordered.
Parameters
----------
gdir : oggm.GlacierDirectory
The GDir of the glacier of interest
buffer: float
Buffer around the flowlines to find overlaps
"""
# No buffer does not work
if buffer is None:
buffer = cfg.PARAMS['kbuffer']
# Number of pixels to arbitrarily remove at junctions
lid = int(cfg.PARAMS['flowline_junction_pix'])
fls = gdir.read_pickle('model_flowlines')
# seperate the main main flowline
mainfl = fls.pop(-1)
# split fls in main and tribs
mfls = [fl for fl in fls if fl.flows_to is None]
tfls = [fl for fl in fls if fl not in mfls]
# --- first treat the main flowlines ---
# sort by order and length as a second choice
mfls.sort(key=lambda x: (x.order, len(x.inflows), x.length_m),
reverse=False)
merged = []
# for fl1 in mfls:
while len(mfls) > 0:
fl1 = mfls.pop(0)
ol_index = [] # list of index from first overlap
# loop over other main lines and main main line
for fl2 in mfls + [mainfl]:
# calculate overlap, maybe use larger buffer here only to find it
_overlap = fl1.line.intersection(fl2.line.buffer(buffer*2))
# calculate indice of first overlap if overlap length > 0
oix = 9999
if _overlap.length > 0 and fl1 != fl2 and fl2.flows_to != fl1:
if isinstance(_overlap, shpg.MultiLineString):
if _overlap[0].coords[0] == fl1.line.coords[0]:
# if the head of overlap is same as the first line,
# best guess is, that the heads are close topgether!
_ov1 = _overlap[1].coords[1]
else:
_ov1 = _overlap[0].coords[1]
else:
_ov1 = _overlap.coords[1]
for _i, _p in enumerate(fl1.line.coords):
if _p == _ov1:
oix = _i
# low indices are more likely due to an wrong overlap
if oix < 10:
oix = 9999
ol_index.append(oix)
ol_index = np.array(ol_index)
if np.all(ol_index == 9999):
log.warning('Glacier %s could not be merged, removed!' %
fl1.rgi_id)
# remove possible tributary flowlines
tfls = [fl for fl in tfls if fl.rgi_id != fl1.rgi_id]
# skip rest of this while loop
continue
# make this based on first overlap, but consider order and or length
minx = ol_index[ol_index <= ol_index.min()+10][-1]
i = np.where(ol_index == minx)[0][-1]
_olline = (mfls + [mainfl])[i]
# 1. cut line to size
_line = fl1.line
bufferuse = buffer
while bufferuse > 0:
_overlap = _line.intersection(_olline.line.buffer(bufferuse))
_linediff = _line.difference(_overlap) # cut to new line
# if the tributary flowline is longer than the main line,
# _line will contain multiple LineStrings: only keep the first
if isinstance(_linediff, shpg.MultiLineString):
_linediff = _linediff[0]
if len(_linediff.coords) < 10:
bufferuse -= 1
else:
break
if bufferuse <= 0:
log.warning('Glacier %s would be to short after merge, removed!' %
fl1.rgi_id)
# remove possible tributary flowlines
tfls = [fl for fl in tfls if fl.rgi_id != fl1.rgi_id]
# skip rest of this while loop
continue
# remove cfg.PARAMS['flowline_junction_pix'] from the _line
# gives a bigger gap at the junction and makes sure the last
# point is not corrupted in terms of spacing
_line = shpg.LineString(_linediff.coords[:-lid])
# 2. set new line
fl1.set_line(_line)
# 3. set flow to attributes. This also adds inflow values to other
fl1.set_flows_to(_olline)
# change the array size of tributary flowline attributs
for atr, value in fl1.__dict__.items():
if atr in ['_ptrap', '_prec']:
# those are indices, remove those above nx
fl1.__setattr__(atr, value[value < fl1.nx])
elif isinstance(value, np.ndarray) and (len(value) > fl1.nx):
# those are actual parameters on the grid
fl1.__setattr__(atr, value[:fl1.nx])
merged.append(fl1)
allfls = merged + tfls
# now check all lines for possible cut offs
for fl in allfls:
try:
fl.flows_to_indice
except AssertionError:
mfl = fl.flows_to
# remove it from original
mfl.inflow_points.remove(fl.flows_to_point)
mfl.inflows.remove(fl)
prdis = mfl.line.project(fl.tail)
mfl_keep = mfl
while mfl.flows_to is not None:
prdis2 = mfl.flows_to.line.project(fl.tail)
if prdis2 < prdis:
mfl_keep = mfl
prdis = prdis2
mfl = mfl.flows_to
# we should be good to add this line here
fl.set_flows_to(mfl_keep.flows_to)
allfls = allfls + [mainfl]
for fl in allfls:
fl.inflows = []
fl.inflow_points = []
if hasattr(fl, '_lazy_flows_to_indice'):
delattr(fl, '_lazy_flows_to_indice')
if hasattr(fl, '_lazy_inflow_indices'):
delattr(fl, '_lazy_inflow_indices')
for fl in allfls:
if fl.flows_to is not None:
fl.set_flows_to(fl.flows_to)
for fl in allfls:
fl.order = line_order(fl)
# order flowlines in descending way
allfls.sort(key=lambda x: x.order, reverse=False)
# assert last flowline is main flowline
assert allfls[-1] == mainfl
# Finally write the flowlines
gdir.write_pickle(allfls, 'model_flowlines')
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 48 | 3653 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
ogrisel/scipy | scipy/signal/ltisys.py | 5 | 30979 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
#
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from scipy.lib.six import xrange
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return array([], float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:, input]
B.shape = (B.shape[0], 1)
if D.shape[-1] != 0:
D = D[:, input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
Parameters
----------
args : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of elements in the tuple and the
interpretation:
* 2: (numerator, denominator)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
Each argument can be an array or sequence.
Notes
-----
`lti` instances have all types of representations available; for example
after creating an instance s with ``(zeros, poles, gain)`` the transfer
function representation (numerator, denominator) can be accessed as
``s.num`` and ``s.den``.
"""
def __init__(self, *args, **kwords):
"""
Initialize the LTI system using either:
- (numerator, denominator)
- (zeros, poles, gain)
- (A, B, C, D) : state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self._num, self._den = normalize(*args)
self._update(N)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self._zeros, self._poles, self._gain = args
self._update(N)
# make sure we have numpy arrays
self.zeros = numpy.asarray(self.zeros)
self.poles = numpy.asarray(self.poles)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self._A, self._B, self._C, self._D = abcd_normalize(*args)
self._update(N)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __repr__(self):
"""
Canonical representation using state-space to preserve numerical
precision and any MIMO information
"""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def num(self):
return self._num
@num.setter
def num(self, value):
self._num = value
self._update(2)
@property
def den(self):
return self._den
@den.setter
def den(self, value):
self._den = value
self._update(2)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, value):
self._zeros = value
self._update(3)
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, value):
self._poles = value
self._update(3)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
self._update(3)
@property
def A(self):
return self._A
@A.setter
def A(self, value):
self._A = value
self._update(4)
@property
def B(self):
return self._B
@B.setter
def B(self, value):
self._B = value
self._update(4)
@property
def C(self):
return self._C
@C.setter
def C(self, value):
self._C = value
self._update(4)
@property
def D(self):
return self._D
@D.setter
def D(self, value):
self._D = value
self._update(4)
def _update(self, N):
if N == 2:
self._zeros, self._poles, self._gain = tf2zpk(self.num, self.den)
self._A, self._B, self._C, self._D = tf2ss(self.num, self.den)
if N == 3:
self._num, self._den = zpk2tf(self.zeros, self.poles, self.gain)
self._A, self._B, self._C, self._D = zpk2ss(self.zeros,
self.poles, self.gain)
if N == 4:
self._num, self._den = ss2tf(self.A, self.B, self.C, self.D)
self._zeros, self._poles, self._gain = ss2zpk(self.A, self.B,
self.C, self.D)
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See scipy.signal.bode for details.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See scipy.signal.freqresp for details.
"""
return freqresp(self, w=w, n=n)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0], 1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T), sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1] - T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1, ATm1)
I = eye(A.shape[0], dtype=A.dtype)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
for k in xrange(1, len(T)):
dt1 = T[k] - T[k - 1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti, diag(numpy.exp(dt * lam))), vt)
GT = _cast_to_array_dtype(GT, xout)
GTmI = GT - I
F1T = dot(dot(BT, GTmI), ATm1)
if interp:
F2T = dot(BT, dot(GTmI, ATm2) / dt - ATm1)
xout[k] = dot(xout[k - 1], GT) + dot(U[k - 1], F1T)
if interp:
xout[k] = xout[k] + dot((U[k] - U[k - 1]), F2T)
yout = (squeeze(dot(U, transpose(sys.D))) +
squeeze(dot(xout, transpose(sys.C))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
h = zeros(T.shape, sys.A.dtype)
s, v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s * T[k]))
eA = dot(dot(v, es), vi)
eA = _cast_to_array_dtype(eA, h)
h[k] = squeeze(dot(dot(C, eA), B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
.. versionadded:: 0.11.0
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
| bsd-3-clause |
Scapogo/zipline | zipline/algorithm_live.py | 1 | 10219 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
import os.path
import logbook
import pandas as pd
import zipline.protocol as zp
from zipline.algorithm import TradingAlgorithm
from zipline.gens.realtimeclock import RealtimeClock
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.errors import (OrderInBeforeTradingStart,
ScheduleFunctionOutsideTradingStart)
from zipline.utils.input_validation import error_keywords
from zipline.utils.api_support import (
ZiplineAPI,
api_method,
disallowed_in_before_trading_start,
allowed_only_in_before_trading_start)
from zipline.utils.calendars.trading_calendar import days_at_time
from zipline.utils.serialization_utils import load_context, store_context
log = logbook.Logger("Live Trading")
class LiveAlgorithmExecutor(AlgorithmSimulator):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
class LiveTradingAlgorithm(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.broker = kwargs.pop('broker', None)
self.orders = {}
self.algo_filename = kwargs.get('algo_filename', "<algorithm>")
self.state_filename = kwargs.pop('state_filename', None)
self.realtime_bar_target = kwargs.pop('realtime_bar_target', None)
self._context_persistence_excludes = []
super(self.__class__, self).__init__(*args, **kwargs)
log.info("initialization done")
def initialize(self, *args, **kwargs):
self._context_persistence_excludes = (list(self.__dict__.keys()) +
['trading_client'])
if os.path.isfile(self.state_filename):
log.info("Loading state from {}".format(self.state_filename))
load_context(self.state_filename,
context=self,
checksum=self.algo_filename)
return
with ZiplineAPI(self):
super(self.__class__, self).initialize(*args, **kwargs)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def handle_data(self, data):
super(self.__class__, self).handle_data(data)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def _create_clock(self):
# This method is taken from TradingAlgorithm.
# The clock has been replaced to use RealtimeClock
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
assert self.sim_params.emission_rate == 'minute'
minutely_emission = True
market_opens = trading_o_and_c['market_open']
market_closes = trading_o_and_c['market_close']
# The calendar's execution times are the minutes over which we actually
# want to run the clock. Typically the execution times simply adhere to
# the market open and close times. In the case of the futures calendar,
# for example, we only want to simulate over a subset of the full 24
# hour calendar, so the execution times dictate a market open time of
# 6:31am US/Eastern and a close of 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return RealtimeClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
time_skew=self.broker.time_skew
)
def _create_generator(self, sim_params):
# Call the simulation trading algorithm for side-effects:
# it creates the perf tracker
TradingAlgorithm._create_generator(self, sim_params)
self.trading_client = LiveAlgorithmExecutor(
self,
sim_params,
self.data_portal,
self._create_clock(),
self._create_benchmark_source(),
self.restrictions,
universe_func=self._calculate_universe
)
return self.trading_client.transform()
def updated_portfolio(self):
return self.broker.portfolio
def updated_account(self):
return self.broker.account
@api_method
@allowed_only_in_before_trading_start(
ScheduleFunctionOutsideTradingStart())
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
# If the scheduled_function() is called from initalize()
# then the state persistence would need to take care of storing and
# restoring the scheduled functions too (as initialize() only called
# once in the algorithm's life). Persisting scheduled functions are
# difficult as they are not serializable by default.
# We enforce scheduled functions to be called only from
# before_trading_start() in live trading with a decorator.
super(self.__class__, self).schedule_function(func,
date_rule,
time_rule,
half_days,
calendar)
@api_method
def symbol(self, symbol_str):
# This method works around the problem of not being able to trade
# assets which does not have ingested data for the day of trade.
# Normally historical data is loaded to bundle and the asset's
# end_date and auto_close_date is set based on the last entry from
# the bundle db. LiveTradingAlgorithm does not override order_value(),
# order_percent() & order_target(). Those higher level ordering
# functions provide a safety net to not to trade de-listed assets.
# If the asset is returned as it was ingested (end_date=yesterday)
# then CannotOrderDelistedAsset exception will be raised from the
# higher level order functions.
#
# Hence, we are increasing the asset's end_date by 10,000 days.
# The ample buffer is provided for two reasons:
# 1) assets are often stored in algo's context through initialize(),
# which is called once and persisted at live trading. 10,000 days
# enables 27+ years of trading, which is more than enough.
# 2) Tool - 10,000 Days is brilliant!
asset = super(self.__class__, self).symbol(symbol_str)
tradeable_asset = asset.to_dict()
tradeable_asset['end_date'] = (pd.Timestamp('now', tz='UTC') +
pd.Timedelta('10000 days'))
tradeable_asset['auto_close_date'] = tradeable_asset['end_date']
return asset.from_dict(tradeable_asset)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order(self,
asset,
amount,
limit_price=None,
stop_price=None,
style=None):
amount, style = self._calculate_order(asset, amount,
limit_price, stop_price, style)
return self.broker.order(asset, amount, limit_price, stop_price, style)
@api_method
def batch_market_order(self, share_counts):
raise NotImplementedError()
@error_keywords(sid='Keyword argument `sid` is no longer supported for '
'get_open_orders. Use `asset` instead.')
@api_method
def get_open_orders(self, asset=None):
return self.broker.get_open_orders(asset)
@api_method
def get_order(self, order_id):
return self.broker.get_order(order_id)
@api_method
def cancel_order(self, order_param):
order_id = order_param
if isinstance(order_param, zp.Order):
order_id = order_param.id
self.broker.cancel_order(order_id)
def run(self, *args, **kwargs):
daily_stats = super(self.__class__, self).run(*args, **kwargs)
self.on_exit()
return daily_stats
def on_exit(self):
if not self.realtime_bar_target:
return
log.info("Storing realtime bars to: {}".format(
self.realtime_bar_target))
today = str(pd.to_datetime('today').date())
subscribed_assets = self.broker.subscribed_assets
realtime_history = self.broker.get_realtime_bars(subscribed_assets,
'1m')
if not os.path.exists(self.realtime_bar_target):
os.mkdir(self.realtime_bar_target)
for asset in subscribed_assets:
filename = "zipline-live-%s-%s.csv" % (asset.symbol, today)
path = os.path.join(self.realtime_bar_target, filename)
realtime_history[asset].to_csv(path, mode='a',
index_label='datetime',
header=not os.path.exists(path))
| apache-2.0 |
Canpio/Paddle | python/paddle/v2/plot/plot.py | 7 | 2729 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class PlotData(object):
def __init__(self):
self.step = []
self.value = []
def append(self, step, value):
self.step.append(step)
self.value.append(value)
def reset(self):
self.step = []
self.value = []
class Ploter(object):
def __init__(self, *args):
self.__args__ = args
self.__plot_data__ = {}
for title in args:
self.__plot_data__[title] = PlotData()
# demo in notebooks will use Ploter to plot figure, but when we convert
# the ipydb to py file for testing, the import of matplotlib will make the
# script crash. So we can use `export DISABLE_PLOT=True` to disable import
# these libs
self.__disable_plot__ = os.environ.get("DISABLE_PLOT")
if not self.__plot_is_disabled__():
import matplotlib.pyplot as plt
from IPython import display
self.plt = plt
self.display = display
def __plot_is_disabled__(self):
return self.__disable_plot__ == "True"
def append(self, title, step, value):
assert isinstance(title, basestring)
assert self.__plot_data__.has_key(title)
data = self.__plot_data__[title]
assert isinstance(data, PlotData)
data.append(step, value)
def plot(self, path=None):
if self.__plot_is_disabled__():
return
titles = []
for title in self.__args__:
data = self.__plot_data__[title]
assert isinstance(data, PlotData)
if len(data.step) > 0:
titles.append(title)
self.plt.plot(data.step, data.value)
self.plt.legend(titles, loc='upper left')
if path is None:
self.display.clear_output(wait=True)
self.display.display(self.plt.gcf())
else:
self.plt.savefig(path)
self.plt.gcf().clear()
def reset(self):
for key in self.__plot_data__:
data = self.__plot_data__[key]
assert isinstance(data, PlotData)
data.reset()
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/io/msgpack/test_except.py | 7 | 1068 | # coding: utf-8
import pytest
from pandas.io.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(object):
def test_raise_on_find_unsupported_value(self):
import datetime
pytest.raises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
pytest.raises(DummyException, unpackb, packb({}), object_hook=hook)
pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}),
object_hook=hook)
pytest.raises(DummyException, unpackb, packb({'fizz': 'buzz'}),
object_pairs_hook=hook)
pytest.raises(DummyException, unpackb,
packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
pytest.raises(DummyException, unpackb,
packb({'fizz': {'buzz': 'spam'}}),
object_pairs_hook=hook)
def test_invalidvalue(self):
pytest.raises(ValueError, unpackb, b'\xd9\x97#DL_')
| mit |
ccasotto/rmtk | rmtk/plotting/hazard_outputs/plot_hazard_outputs.py | 3 | 12351 | #!/usr/bin/env python
# LICENSE
#
# Copyright (c) 2015, GEM Foundation.
#
# The nrml_convertes is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software nrml_convertes provided herein is released as a prototype
# implementation on behalf of scientists and engineers working within the GEM
# Foundation (Global Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The nrml_convertes is therefore distributed WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# The GEM Foundation, and the authors of the software, assume no liability for
# use of the software.
"""
RMTK Tools for the parsing and visualisation of hazard data
"""
import os
import numpy as np
from lxml import etree
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import LogNorm, Normalize
from rmtk.parsers.hazard_parsers import HazardCurveXMLParser
NRML='{http://openquake.org/xmlns/nrml/0.5}'
GML='{http://www.opengis.net/gml}'
def _set_curves_matrix(hcm):
"""
Store locations and poes in :class:`openquake.nrml.models.HazardCurveModel`
in numpy array.
"""
curves = []
for loc, poes in hcm:
row = [loc.x, loc.y]
row.extend(poes)
curves.append(row)
return np.array(curves)
def _set_header(hcm):
"""
Save metadata in :class:`openquake.nrml.models.HazardCurveModel`
in a string to be used as header
"""
header = ','.join(
['%s=%s' % (k,v) for k,v in hcm.metadata.items()
if v is not None and k != 'imls']
)
header = '# ' + header
header += '\nlon,lat,'+','.join([str(iml) for iml in hcm.metadata['imls']])
return header
class HazardCurve(object):
"""
Class to hold hazard curve information
"""
def __init__(self, input_filename):
"""
Read in the hazard curve from the filename
"""
self.hcm = HazardCurveXMLParser(input_filename).parse()
self.data = _set_curves_matrix(self.hcm)
self.loc_list = ["{:.6f}|{:.6f}".format(row[0], row[1])
for row in self.data]
def plot(self, idx, output_file=None, dpi=300, fmt="png", papertype="a4"):
"""
Creates the hazard curve plot
"""
if ("PGA" in self.hcm.metadata["imt"]) or\
("SA" in self.hcm.metadata["imt"]):
imt_units = "g"
else:
imt_units = "cm/s"
if isinstance(idx, int):
longitude, latitude, curve = self._get_curve_from_id(idx)
elif isinstance(idx, str):
longitude, latitude, curve = self._get_curve_from_string(idx)
else:
raise ValueError("Index not recognised!")
fig = plt.figure(figsize=(7, 5))
#fig.set_tight_layout(True)
plt.loglog(self.hcm.metadata["imls"], curve, 'bo-', linewidth=2.0)
plt.xlabel("%s (%s)" %(self.hcm.metadata["imt"], imt_units),
fontsize=14)
plt.ylabel("Probability of Being Exceeded in %s years" %
self.hcm.metadata["investigation_time"], fontsize=14)
if longitude < 0.0:
long_ind = "W"
else:
long_ind = "E"
if latitude < 0.0:
lat_ind = "S"
else:
lat_ind = "N"
plt.title("Location: %12.6f %s, %12.6f %s" %(
np.abs(longitude), long_ind, np.abs(latitude), lat_ind))
if output_file:
plt.savefig(output_file, dpi=dpi, format=fmt, papertype="a4")
def _get_curve_from_id(self, idx):
"""
Returns the curve based on the location in the array
"""
return self.data[idx,0], self.data[idx, 1], self.data[idx, 2:]
def _get_curve_from_string(self, idx):
"""
Returns the curve based on the location defined by a string
"""
if idx in self.loc_list:
idx = self.loc_list.index(idx)
else:
raise ValueError("Location index %s not in curve list" % idx)
return self.data[idx,0], self.data[idx, 1], self.data[idx, 2:]
def parse_nrml_uhs_curves(nrml_uhs_map):
"""
Parse NRML uhs file.
"""
metadata = {}
periods = None
values = []
parse_args = dict(source=nrml_uhs_map)
for _, element in etree.iterparse(**parse_args):
if element.tag == '%suniformHazardSpectra' % NRML:
a = element.attrib
metadata['statistics'] = a.get('statistics')
metadata['quantile_value'] = a.get('quantileValue')
metadata['smlt_path'] = a.get('sourceModelTreePath')
metadata['gsimlt_path'] = a.get('gsimTreePath')
metadata['investigation_time'] = a['investigationTime']
metadata['poe'] = a.get('poE')
elif element.tag == '%speriods' % NRML:
periods = map(float, element.text.split())
elif element.tag == '%suhs' % NRML:
lon, lat = map(
float, element.find('%sPoint/%spos' % (GML, GML)).text.split()
)
imls = map(float, element.find('%sIMLs' % NRML).text.split())
uhs = [lon, lat]
uhs.extend(imls)
values.append(uhs)
return metadata, periods, np.array(values)
class UniformHazardSpectra(HazardCurve):
"""
Class to hold and plot uniform hazard spectra information
"""
def __init__(self, input_filename):
"""
Instantiation and parsing
"""
self.metadata, self.periods, self.data = parse_nrml_uhs_curves(
input_filename)
self.loc_list = ["{:.6f}|{:.6f}".format(row[0], row[1])
for row in self.data]
def plot(self, idx, output_file=None, dpi=300, fmt="png",
papertype="a4"):
"""
Creates the UHS plot
"""
if not self.metadata["statistics"]:
self.metadata["statistics"] = ""
if isinstance(idx, int):
longitude, latitude, spectrum = self._get_curve_from_id(idx)
elif isinstance(idx, str):
longitude, latitude, spectrum = self._get_curve_from_string(idx)
else:
raise ValueError("Index not recognised!")
fig = plt.figure(figsize=(7, 5))
#fig.set_tight_layout(True)
plt.plot(self.periods, spectrum, 'bo-', linewidth=2.0)
plt.xlabel("Period (s)", fontsize=14)
plt.ylabel("Spectral Acceleration (g)", fontsize=14)
plt.grid(b=True, color='0.66', linestyle="--")
if longitude < 0.0:
long_ind = "W"
else:
long_ind = "E"
if latitude < 0.0:
lat_ind = "S"
else:
lat_ind = "N"
title_string_upper = "{:s} UHS with a {:s} PoE in {:s} Years\n".format(
self.metadata["statistics"],
self.metadata["poe"],
self.metadata["investigation_time"])
title_string_lower = "Location: {:.6f}{:s}, {:.6f}{:s}".format(
np.abs(longitude), long_ind, np.abs(latitude), lat_ind)
plt.title(title_string_upper + title_string_lower, fontsize=16)
if output_file:
plt.savefig(output_file, dpi=dpi, format=fmt, papertype="a4")
def parse_nrml_hazard_map(nrml_hazard_map):
"""
Parse NRML hazard map file.
"""
metadata = {}
values = []
parse_args = dict(source=nrml_hazard_map)
for _, element in etree.iterparse(**parse_args):
if element.tag == '%shazardMap' % NRML:
a = element.attrib
metadata['smlt_path'] = a.get('sourceModelTreePath')
metadata['gsimlt_path'] = a.get('gsimTreePath')
metadata['imt'] = a['IMT']
metadata['investigation_time'] = a['investigationTime']
metadata['poe'] = a.get('poE')
metadata['sa_period'] = a.get('saPeriod')
metadata['sa_damping'] = a.get('saDamping')
metadata['statistics'] = a.get('statistics')
metadata['quantile_value'] = a.get('quantileValue')
elif element.tag == '%snode' % NRML:
a = element.attrib
values.append(
map(float, [a.get('lon'), a.get('lat'), a.get('iml')])
)
return metadata, np.array(values)
class HazardMap(object):
"""
Class to hold and plot hazard map information
"""
def __init__(self, input_filename):
"""
Instantiate and parse input file
"""
self.metadata, self.data = parse_nrml_hazard_map(input_filename)
self.box = {}
self.box["lon_1"] = min(self.data[:,0])
self.box["lon_2"] = max(self.data[:,0])
self.box["lat_1"] = min(self.data[:,1])
self.box["lat_2"] = max(self.data[:,1])
self.box["lat_length"] = abs(self.box["lat_2"] - self.box["lat_1"])
self.box["lon_length"] = abs(self.box["lon_2"] - self.box["lon_1"])
def plot(self, log_scale=False, marker_size=20,
output_file=None, dpi=300, fmt="png", papertype="a4"):
"""
"""
plt.figure(figsize=(8, 6), dpi=300, facecolor='w',
edgecolor='k')
map_func = Basemap(llcrnrlon=self.box["lon_1"],
llcrnrlat=self.box["lat_1"],
urcrnrlon=self.box["lon_2"],
urcrnrlat=self.box["lat_2"],
projection='mill',
resolution='i')
x, y = map_func(self.data[:, 0], self.data[:, 1])
#map_func.shadedrelief()
map_func.drawcoastlines(linewidth = 0.25, color = "gray")
map_func.drawcountries(linewidth = 1.00, color = "gray")
map_func.drawstates(linewidth = 0.25, color = "gray")
map_func.drawmapboundary(fill_color = 'lightblue')
map_func.fillcontinents(color = 'white', lake_color = 'lightblue')
if log_scale:
scale = LogNorm()
else:
scale = Normalize()
plt.scatter(x, y , s=marker_size, c=self.data[:, 2], zorder=4,
cmap='bwr',edgecolor='None',norm = scale)
cbar = map_func.colorbar(location='right',pad="5%")
if self.metadata["imt"] == "PGV":
imt_units = "cm/s"
else:
imt_units = "g"
cbar.set_label("{:s} ({:s})".format(
self.metadata["imt"],
imt_units))
if self.box["lat_length"] < 2:
parallels = np.arange(0., 81, 0.25)
else:
parallels = np.arange(0., 81, 1.00)
# labels = [left,right,top,bottom]
map_func.drawparallels(parallels,labels=[True,False,True,False])
if self.box["lon_length"] < 2:
meridians = np.arange(0., 360, 0.25)
else:
meridians = np.arange(0., 360, 1.00)
map_func.drawmeridians(meridians,labels=[True,False,False,True])
title_string = "Hazard Map with a {:s} PoE in {:s} Years\n".format(
self.metadata["poe"],
self.metadata["investigation_time"])
plt.title(title_string, fontsize=16)
plt.show()
if output_file:
plt.savefig(output_file, dpi=dpi, format=fmt, papertype="a4")
| agpl-3.0 |
crackhopper/TFS-toolbox | tfs/data_processor/sklearn_processor.py | 1 | 2638 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import preprocessing
from tfs.data_processor.base import *
from tfs.dataset.subset import DataSubset
class _SKLearnType(object):
undecide = 0
data = 1
labels = 2
both = 3
class SKLearnTransformer(BaseProcessor):
_type=_SKLearnType.undecide
def _apply(self,op,dataset):
if self._type ==_SKLearnType.undecide:
raise RuntimeError("%s does not define _type variable"%type(self).__name__)
elif self._type ==_SKLearnType.data:
res = op(dataset.data)
return DataSubset(res,dataset.labels)
elif self._type ==_SKLearnType.labels:
res=op(dataset.labels)
return DataSubset(dataset.data,res)
elif self._type ==_SKLearnType.both:
res=op(dataset.data,dataset.labels)
return DataSubset(res,dataset.labels)
else:
raise RuntimeError("%s define an unsupported _type variable"%type(self).__name__)
def fit_transform(self,dataset):
return self._apply(self.p.fit_transform,dataset)
def transform(self,dataset):
return self._apply(self.p.transform,dataset)
def inverse_transform(self,dataset):
return self._apply(self.p.inverse_transform,dataset)
class LabelBinarizer(SKLearnTransformer):
""" a wrapper for sklearn.preprocessing.LabelBinarizer
see http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html
"""
_type=_SKLearnType.labels
def __init__(self,neg_label=0, pos_label=1, sparse_output=False):
self.p = preprocessing.LabelBinarizer(neg_label, pos_label, sparse_output)
class StandardScaler(SKLearnTransformer):
""" a wrapper for sklearn.preprocessing.StandardScaler
see http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
"""
_type=_SKLearnType.data
def __init__(self,copy=True, with_mean=True, with_std=True):
self.p = preprocessing.StandardScaler(copy, with_mean, with_std)
class MinMaxScaler(SKLearnTransformer):
""" a wrapper for sklearn.preprocessing.MinMaxScaler
see http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
"""
_type=_SKLearnType.data
def __init__(self,feature_range=(0, 1), copy=True):
self.p = preprocessing.MinMaxScaler(feature_range, copy)
class Normalizer(SKLearnTransformer):
""" a wrapper for sklearn.preprocessing.Normalizer
see http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html
"""
_type=_SKLearnType.data
def __init__(self,norm='l2', copy=True):
self.p = preprocessing.Normalizer(norm,copy)
| mit |
lazywei/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
aneeshusa/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py | 2 | 20166 | import operator
import numpy as np
import pytest
import pandas as pd
from pandas.core import ops
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=["integer", "block"])
def kind(request):
"""kind kwarg to pass to SparseArray/SparseSeries"""
return request.param
@pytest.fixture(params=[True, False])
def mix(request):
# whether to operate op(sparse, dense) instead of op(sparse, sparse)
return request.param
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseArrayArithmetics:
_base = np.array
_klass = pd.SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):
with np.errstate(invalid="ignore", divide="ignore"):
if op in [operator.floordiv, ops.rfloordiv]:
# FIXME: GH#13843
if self._base == pd.Series and a.dtype.subtype == np.dtype("int64"):
pytest.xfail("Not defined/working. See GH#13843")
if mix:
result = op(a, b_dense).to_dense()
else:
result = op(a, b).to_dense()
if op in [operator.truediv, ops.rtruediv]:
# pandas uses future division
expected = op(a_dense * 1.0, b_dense)
else:
expected = op(a_dense, b_dense)
if op in [operator.floordiv, ops.rfloordiv]:
# Series sets 1//0 to np.inf, which SparseArray does not do (yet)
mask = np.isinf(expected)
if mask.any():
expected[mask] = np.nan
self._assert(result, expected)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert isinstance(res.dtype, SparseDtype)
assert res.dtype.subtype == np.bool
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid="ignore"):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
@pytest.mark.parametrize("scalar", [0, 1, 3])
@pytest.mark.parametrize("fill_value", [None, 0, 2])
def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar
):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
def test_float_scalar_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self, kind, mix, all_arithmetic_functions):
# when sp_index are the same
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_comparison(self, kind):
# when sp_index are the same
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_different_kind(self, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind="integer")
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block", fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=1)
b = self._klass(rvalues, kind="block", fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype, fill_value=1)
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_int_array_comparison(self, kind):
dtype = "int64"
# int32 NI ATM
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_same_index(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, True, True], dtype=np.bool)
a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_array_logical(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, False, True, False, True], dtype=np.bool)
a = self._klass(values, kind=kind, dtype=np.bool, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
rdtype = "int64"
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_mixed_array_comparison(self, kind):
rdtype = "int64"
# int32 NI ATM
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
class TestSparseSeriesArithmetic(TestSparseArrayArithmetics):
_base = pd.Series
_klass = pd.SparseSeries
def _assert(self, a, b):
tm.assert_series_equal(a, b)
def test_alignment(self, mix, all_arithmetic_functions):
op = all_arithmetic_functions
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[1, 2, 3, 4])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(
np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=0
)
self._check_numeric_ops(sa, sb, da, db, mix, op)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(
np.arange(4), index=[1, 2, 3, 4], dtype=np.int64, fill_value=np.nan
)
self._check_numeric_ops(sa, sb, da, db, mix, op)
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[10, 11, 12, 13])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(
np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=0
)
self._check_numeric_ops(sa, sb, da, db, mix, op)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(
np.arange(4), index=[10, 11, 12, 13], dtype=np.int64, fill_value=np.nan
)
self._check_numeric_ops(sa, sb, da, db, mix, op)
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
arr = pd.SparseArray([0, 1], fill_value=0)
result = op(arr, [0, 1])
expected = op(arr, pd.SparseArray([0, 1]))
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.abs, np.exp])
@pytest.mark.parametrize(
"arr", [pd.SparseArray([0, 0, -1, 1]), pd.SparseArray([None, None, -1, 1])]
)
def test_ufuncs(ufunc, arr):
result = ufunc(arr)
fill_value = ufunc(arr.fill_value)
expected = pd.SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
(pd.SparseArray([0, 0, 0]), np.array([0, 1, 2])),
(pd.SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(pd.SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(pd.SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(pd.SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
],
)
@pytest.mark.parametrize("ufunc", [np.add, np.greater])
def test_binary_ufuncs(ufunc, a, b):
# can't say anything about fill value here.
result = ufunc(a, b)
expected = ufunc(np.asarray(a), np.asarray(b))
assert isinstance(result, pd.SparseArray)
tm.assert_numpy_array_equal(np.asarray(result), expected)
def test_ndarray_inplace():
sparray = pd.SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
ndarray += sparray
expected = np.array([0, 3, 2, 3])
tm.assert_numpy_array_equal(ndarray, expected)
def test_sparray_inplace():
sparray = pd.SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
sparray += ndarray
expected = pd.SparseArray([0, 3, 2, 3], fill_value=0)
tm.assert_sp_array_equal(sparray, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = pd.SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = pd.SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = pd.SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = pd.SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
| apache-2.0 |
fmv1992/data_utilities | setup.py | 1 | 4168 | """data_utilities setup file.
Based on https://github.com/pypa/sampleproject/blob/master/setup.py and also
based on the guide:
https://packaging.python.org/distributing/#requirements-for-packaging-and-distributing # noqa
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'readme.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='data_utilities',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.2.8',
description='A data analysis and visualization helper module.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/fmv1992/data_utilities',
# Author details
author='Felipe M. Vieira',
author_email='[email protected]',
# Choose your license
license='GPLv2 or any later',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Topic :: Scientific/Engineering :: Information Analysis',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='data sciences',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'numpy',
'scipy',
'pandas',
'matplotlib',
'seaborn',
'scikit-learn',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| gpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/tools/compatibility/renames_v2.py | 1 | 55971 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE':
'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue':
'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION':
'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG':
'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator':
'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase':
'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto':
'tf.compat.v1.ConfigProto',
'tf.Dimension':
'tf.compat.v1.Dimension',
'tf.Event':
'tf.compat.v1.Event',
'tf.FIFOQueue':
'tf.queue.FIFOQueue',
'tf.FixedLenFeature':
'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature':
'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader':
'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION':
'tf.version.GIT_VERSION',
'tf.GPUOptions':
'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION':
'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER':
'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER':
'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef':
'tf.compat.v1.GraphDef',
'tf.GraphKeys':
'tf.compat.v1.GraphKeys',
'tf.GraphOptions':
'tf.compat.v1.GraphOptions',
'tf.HistogramProto':
'tf.compat.v1.HistogramProto',
'tf.IdentityReader':
'tf.compat.v1.IdentityReader',
'tf.InteractiveSession':
'tf.compat.v1.InteractiveSession',
'tf.LMDBReader':
'tf.compat.v1.LMDBReader',
'tf.LogMessage':
'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD':
'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef':
'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList':
'tf.compat.v1.NameAttrList',
'tf.NoGradient':
'tf.no_gradient',
'tf.NodeDef':
'tf.compat.v1.NodeDef',
'tf.NotDifferentiable':
'tf.no_gradient',
'tf.OpError':
'tf.errors.OpError',
'tf.OptimizerOptions':
'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.Print':
'tf.compat.v1.Print',
'tf.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES':
'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase':
'tf.queue.QueueBase',
'tf.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.ReaderBase':
'tf.compat.v1.ReaderBase',
'tf.RunMetadata':
'tf.compat.v1.RunMetadata',
'tf.RunOptions':
'tf.compat.v1.RunOptions',
'tf.Session':
'tf.compat.v1.Session',
'tf.SessionLog':
'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator':
'tf.sparse.SparseConditionalAccumulator',
'tf.SparseFeature':
'tf.io.SparseFeature',
'tf.SparseTensorValue':
'tf.compat.v1.SparseTensorValue',
'tf.Summary':
'tf.compat.v1.Summary',
'tf.SummaryMetadata':
'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader':
'tf.compat.v1.TFRecordReader',
'tf.TensorInfo':
'tf.compat.v1.TensorInfo',
'tf.TextLineReader':
'tf.compat.v1.TextLineReader',
'tf.VERSION':
'tf.version.VERSION',
'tf.VarLenFeature':
'tf.io.VarLenFeature',
'tf.VariableScope':
'tf.compat.v1.VariableScope',
'tf.WholeFileReader':
'tf.compat.v1.WholeFileReader',
'tf.accumulate_n':
'tf.math.accumulate_n',
'tf.add_check_numerics_ops':
'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection':
'tf.compat.v1.add_to_collection',
'tf.add_to_collections':
'tf.compat.v1.add_to_collections',
'tf.all_variables':
'tf.compat.v1.all_variables',
'tf.angle':
'tf.math.angle',
'tf.app.run':
'tf.compat.v1.app.run',
'tf.assert_greater_equal':
'tf.compat.v1.assert_greater_equal',
'tf.assert_integer':
'tf.compat.v1.assert_integer',
'tf.assert_less_equal':
'tf.compat.v1.assert_less_equal',
'tf.assert_near':
'tf.compat.v1.assert_near',
'tf.assert_negative':
'tf.compat.v1.assert_negative',
'tf.assert_non_negative':
'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive':
'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal':
'tf.compat.v1.assert_none_equal',
'tf.assert_positive':
'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable':
'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least':
'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in':
'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype':
'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar':
'tf.compat.v1.assert_scalar',
'tf.assert_type':
'tf.compat.v1.assert_type',
'tf.assert_variables_initialized':
'tf.compat.v1.assert_variables_initialized',
'tf.assign':
'tf.compat.v1.assign',
'tf.assign_add':
'tf.compat.v1.assign_add',
'tf.assign_sub':
'tf.compat.v1.assign_sub',
'tf.batch_scatter_update':
'tf.compat.v1.batch_scatter_update',
'tf.betainc':
'tf.math.betainc',
'tf.ceil':
'tf.math.ceil',
'tf.check_numerics':
'tf.debugging.check_numerics',
'tf.cholesky':
'tf.linalg.cholesky',
'tf.cholesky_solve':
'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm':
'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with':
'tf.compat.v1.colocate_with',
'tf.conj':
'tf.math.conj',
'tf.container':
'tf.compat.v1.container',
'tf.convert_to_tensor_or_indexed_slices':
'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor':
'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to':
'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables':
'tf.compat.v1.create_partitioned_variables',
'tf.cross':
'tf.linalg.cross',
'tf.cumprod':
'tf.math.cumprod',
'tf.data.get_output_classes':
'tf.compat.v1.data.get_output_classes',
'tf.data.get_output_shapes':
'tf.compat.v1.data.get_output_shapes',
'tf.data.get_output_types':
'tf.compat.v1.data.get_output_types',
'tf.data.make_initializable_iterator':
'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator':
'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite':
'tf.math.is_finite',
'tf.debugging.is_inf':
'tf.math.is_inf',
'tf.debugging.is_nan':
'tf.math.is_nan',
'tf.debugging.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.decode_base64':
'tf.io.decode_base64',
'tf.decode_compressed':
'tf.io.decode_compressed',
'tf.decode_json_example':
'tf.io.decode_json_example',
'tf.decode_raw':
'tf.io.decode_raw',
'tf.delete_session_tensor':
'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space':
'tf.compat.v1.depth_to_space',
'tf.dequantize':
'tf.quantization.dequantize',
'tf.deserialize_many_sparse':
'tf.io.deserialize_many_sparse',
'tf.diag':
'tf.linalg.tensor_diag',
'tf.diag_part':
'tf.linalg.tensor_diag_part',
'tf.digamma':
'tf.math.digamma',
'tf.dimension_at_index':
'tf.compat.dimension_at_index',
'tf.dimension_value':
'tf.compat.dimension_value',
'tf.disable_eager_execution':
'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables':
'tf.compat.v1.disable_resource_variables',
'tf.disable_v2_behavior':
'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape':
'tf.compat.v1.disable_v2_tensorshape',
'tf.distribute.get_loss_reduction':
'tf.compat.v1.distribute.get_loss_reduction',
'tf.distributions.Bernoulli':
'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta':
'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical':
'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet':
'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial':
'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution':
'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential':
'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED':
'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma':
'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace':
'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial':
'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED':
'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal':
'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL':
'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType':
'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT':
'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform':
'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence':
'tf.compat.v1.distributions.kl_divergence',
'tf.div':
'tf.compat.v1.div',
'tf.div_no_nan':
'tf.math.divide_no_nan',
'tf.dtypes.as_string':
'tf.strings.as_string',
'tf.enable_eager_execution':
'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables':
'tf.compat.v1.enable_resource_variables',
'tf.enable_v2_behavior':
'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape':
'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64':
'tf.io.encode_base64',
'tf.erf':
'tf.math.erf',
'tf.erfc':
'tf.math.erfc',
'tf.estimator.experimental.KMeans':
'tf.compat.v1.estimator.experimental.KMeans',
'tf.estimator.experimental.dnn_logit_fn_builder':
'tf.compat.v1.estimator.experimental.dnn_logit_fn_builder',
'tf.estimator.experimental.linear_logit_fn_builder':
'tf.compat.v1.estimator.experimental.linear_logit_fn_builder',
'tf.estimator.inputs.numpy_input_fn':
'tf.compat.v1.estimator.inputs.numpy_input_fn',
'tf.estimator.inputs.pandas_input_fn':
'tf.compat.v1.estimator.inputs.pandas_input_fn',
'tf.expm1':
'tf.math.expm1',
'tf.fake_quant_with_min_max_args':
'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient':
'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars':
'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient':
'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel':
'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient':
'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer':
'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model':
'tf.compat.v1.feature_column.linear_model',
'tf.feature_column.shared_embedding_columns':
'tf.compat.v1.feature_column.shared_embedding_columns',
'tf.fft':
'tf.signal.fft',
'tf.fft2d':
'tf.signal.fft2d',
'tf.fft3d':
'tf.signal.fft3d',
'tf.fixed_size_partitioner':
'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv':
'tf.math.floordiv',
'tf.get_collection':
'tf.compat.v1.get_collection',
'tf.get_collection_ref':
'tf.compat.v1.get_collection_ref',
'tf.get_default_graph':
'tf.compat.v1.get_default_graph',
'tf.get_default_session':
'tf.compat.v1.get_default_session',
'tf.get_local_variable':
'tf.compat.v1.get_local_variable',
'tf.get_seed':
'tf.compat.v1.get_seed',
'tf.get_session_handle':
'tf.compat.v1.get_session_handle',
'tf.get_session_tensor':
'tf.compat.v1.get_session_tensor',
'tf.get_variable':
'tf.compat.v1.get_variable',
'tf.get_variable_scope':
'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile':
'tf.compat.v1.gfile.FastGFile',
'tf.global_norm':
'tf.linalg.global_norm',
'tf.global_variables':
'tf.compat.v1.global_variables',
'tf.global_variables_initializer':
'tf.compat.v1.global_variables_initializer',
'tf.graph_util.convert_variables_to_constants':
'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph':
'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu':
'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes':
'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name':
'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft':
'tf.signal.ifft',
'tf.ifft2d':
'tf.signal.ifft2d',
'tf.ifft3d':
'tf.signal.ifft3d',
'tf.igamma':
'tf.math.igamma',
'tf.igammac':
'tf.math.igammac',
'tf.imag':
'tf.math.imag',
'tf.image.resize_area':
'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic':
'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear':
'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_image_with_pad':
'tf.compat.v1.image.resize_image_with_pad',
'tf.image.resize_nearest_neighbor':
'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image':
'tf.compat.v1.image.transpose_image',
'tf.initialize_all_tables':
'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables':
'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables':
'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables':
'tf.compat.v1.initialize_variables',
'tf.initializers.global_variables':
'tf.compat.v1.initializers.global_variables',
'tf.initializers.local_variables':
'tf.compat.v1.initializers.local_variables',
'tf.initializers.tables_initializer':
'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.uniform_unit_scaling':
'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables':
'tf.compat.v1.initializers.variables',
'tf.invert_permutation':
'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue':
'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue':
'tf.queue.PriorityQueue',
'tf.io.QueueBase':
'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue':
'tf.queue.RandomShuffleQueue',
'tf.io.TFRecordCompressionType':
'tf.compat.v1.io.TFRecordCompressionType',
'tf.io.tf_record_iterator':
'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite':
'tf.math.is_finite',
'tf.is_inf':
'tf.math.is_inf',
'tf.is_nan':
'tf.math.is_nan',
'tf.is_non_decreasing':
'tf.math.is_non_decreasing',
'tf.is_numeric_tensor':
'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing':
'tf.math.is_strictly_increasing',
'tf.is_variable_initialized':
'tf.compat.v1.is_variable_initialized',
'tf.keras.backend.get_session':
'tf.compat.v1.keras.backend.get_session',
'tf.keras.layers.CuDNNGRU':
'tf.compat.v1.keras.layers.CuDNNGRU',
'tf.keras.layers.CuDNNLSTM':
'tf.compat.v1.keras.layers.CuDNNLSTM',
'tf.keras.losses.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.losses.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine':
'tf.keras.losses.cosine_similarity',
'tf.keras.metrics.cosine_proximity':
'tf.keras.losses.cosine_similarity',
'tf.layers.AveragePooling1D':
'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D':
'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D':
'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization':
'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D':
'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D':
'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose':
'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D':
'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose':
'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense':
'tf.compat.v1.layers.Dense',
'tf.layers.Dropout':
'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten':
'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec':
'tf.keras.layers.InputSpec',
'tf.layers.Layer':
'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D':
'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D':
'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D':
'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D':
'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D':
'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d':
'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d':
'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d':
'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization':
'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d':
'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d':
'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose':
'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d':
'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose':
'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense':
'tf.compat.v1.layers.dense',
'tf.layers.dropout':
'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope':
'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style':
'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten':
'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d':
'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d':
'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d':
'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d':
'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d':
'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta':
'tf.math.lbeta',
'tf.lgamma':
'tf.math.lgamma',
'tf.lin_space':
'tf.linspace',
'tf.linalg.transpose':
'tf.linalg.matrix_transpose',
'tf.lite.OpHint':
'tf.compat.v1.lite.OpHint',
'tf.lite.TocoConverter':
'tf.compat.v1.lite.TocoConverter',
'tf.lite.constants.GRAPHVIZ_DOT':
'tf.compat.v1.lite.constants.GRAPHVIZ_DOT',
'tf.lite.constants.INT8':
'tf.compat.v1.lite.constants.INT8',
'tf.lite.constants.TFLITE':
'tf.compat.v1.lite.constants.TFLITE',
'tf.lite.experimental.convert_op_hints_to_stubs':
'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs',
'tf.lite.experimental.nn.TFLiteLSTMCell':
'tf.compat.v1.lite.experimental.nn.TFLiteLSTMCell',
'tf.lite.experimental.nn.TfLiteRNNCell':
'tf.compat.v1.lite.experimental.nn.TfLiteRNNCell',
'tf.lite.experimental.nn.dynamic_rnn':
'tf.compat.v1.lite.experimental.nn.dynamic_rnn',
'tf.lite.toco_convert':
'tf.compat.v1.lite.toco_convert',
'tf.local_variables':
'tf.compat.v1.local_variables',
'tf.local_variables_initializer':
'tf.compat.v1.local_variables_initializer',
'tf.log':
'tf.math.log',
'tf.log1p':
'tf.math.log1p',
'tf.log_sigmoid':
'tf.math.log_sigmoid',
'tf.logging.DEBUG':
'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR':
'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL':
'tf.compat.v1.logging.FATAL',
'tf.logging.INFO':
'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage':
'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN':
'tf.compat.v1.logging.WARN',
'tf.logging.debug':
'tf.compat.v1.logging.debug',
'tf.logging.error':
'tf.compat.v1.logging.error',
'tf.logging.fatal':
'tf.compat.v1.logging.fatal',
'tf.logging.flush':
'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity':
'tf.compat.v1.logging.get_verbosity',
'tf.logging.info':
'tf.compat.v1.logging.info',
'tf.logging.log':
'tf.compat.v1.logging.log',
'tf.logging.log_every_n':
'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n':
'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if':
'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity':
'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog':
'tf.compat.v1.logging.vlog',
'tf.logging.warn':
'tf.compat.v1.logging.warn',
'tf.logging.warning':
'tf.compat.v1.logging.warning',
'tf.logical_xor':
'tf.math.logical_xor',
'tf.losses.Reduction':
'tf.compat.v1.losses.Reduction',
'tf.losses.absolute_difference':
'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss':
'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss':
'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance':
'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses':
'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss':
'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses':
'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss':
'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss':
'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss':
'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss':
'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error':
'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error':
'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy':
'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy':
'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy':
'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template':
'tf.compat.v1.make_template',
'tf.make_tensor_proto':
'tf.compat.v1.make_tensor_proto',
'tf.manip.gather_nd':
'tf.compat.v1.manip.gather_nd',
'tf.manip.reshape':
'tf.reshape',
'tf.manip.reverse':
'tf.reverse',
'tf.manip.roll':
'tf.roll',
'tf.manip.scatter_nd':
'tf.scatter_nd',
'tf.manip.space_to_batch_nd':
'tf.space_to_batch_nd',
'tf.manip.tile':
'tf.tile',
'tf.matching_files':
'tf.io.matching_files',
'tf.matrix_band_part':
'tf.linalg.band_part',
'tf.matrix_determinant':
'tf.linalg.det',
'tf.matrix_diag':
'tf.linalg.diag',
'tf.matrix_diag_part':
'tf.linalg.diag_part',
'tf.matrix_inverse':
'tf.linalg.inv',
'tf.matrix_set_diag':
'tf.linalg.set_diag',
'tf.matrix_solve':
'tf.linalg.solve',
'tf.matrix_solve_ls':
'tf.linalg.lstsq',
'tf.matrix_transpose':
'tf.linalg.matrix_transpose',
'tf.matrix_triangular_solve':
'tf.linalg.triangular_solve',
'tf.metrics.accuracy':
'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc':
'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k':
'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives':
'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds':
'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives':
'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds':
'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean':
'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error':
'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance':
'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou':
'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy':
'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error':
'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error':
'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor':
'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below':
'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision':
'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k':
'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds':
'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k':
'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall':
'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k':
'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds':
'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k':
'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error':
'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity':
'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k':
'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k':
'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity':
'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives':
'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds':
'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives':
'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds':
'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner':
'tf.compat.v1.min_max_variable_partitioner',
'tf.model_variables':
'tf.compat.v1.model_variables',
'tf.moving_average_variables':
'tf.compat.v1.moving_average_variables',
'tf.nn.avg_pool_v2':
'tf.nn.avg_pool',
'tf.nn.bidirectional_dynamic_rnn':
'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv2d_backprop_filter':
'tf.compat.v1.nn.conv2d_backprop_filter',
'tf.nn.conv3d_backprop_filter':
'tf.compat.v1.nn.conv3d_backprop_filter',
'tf.nn.conv3d_backprop_filter_v2':
'tf.compat.v1.nn.conv3d_backprop_filter_v2',
'tf.nn.ctc_beam_search_decoder_v2':
'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2':
'tf.nn.ctc_loss',
'tf.nn.depthwise_conv2d_native':
'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter':
'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input':
'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn':
'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler':
'tf.random.log_uniform_candidate_sampler',
'tf.nn.max_pool_v2':
'tf.nn.max_pool',
'tf.nn.quantized_avg_pool':
'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d':
'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool':
'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x':
'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn':
'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer':
'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell':
'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell':
'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DeviceWrapper':
'tf.compat.v1.nn.rnn_cell.DeviceWrapper',
'tf.nn.rnn_cell.DropoutWrapper':
'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell':
'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell':
'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.LSTMStateTuple':
'tf.compat.v1.nn.rnn_cell.LSTMStateTuple',
'tf.nn.rnn_cell.MultiRNNCell':
'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.rnn_cell.RNNCell':
'tf.compat.v1.nn.rnn_cell.RNNCell',
'tf.nn.rnn_cell.ResidualWrapper':
'tf.compat.v1.nn.rnn_cell.ResidualWrapper',
'tf.nn.static_bidirectional_rnn':
'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn':
'tf.compat.v1.nn.static_rnn',
'tf.nn.static_state_saving_rnn':
'tf.compat.v1.nn.static_state_saving_rnn',
'tf.nn.uniform_candidate_sampler':
'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b':
'tf.compat.v1.nn.xw_plus_b',
'tf.op_scope':
'tf.compat.v1.op_scope',
'tf.parse_single_sequence_example':
'tf.io.parse_single_sequence_example',
'tf.parse_tensor':
'tf.io.parse_tensor',
'tf.placeholder':
'tf.compat.v1.placeholder',
'tf.placeholder_with_default':
'tf.compat.v1.placeholder_with_default',
'tf.polygamma':
'tf.math.polygamma',
'tf.profiler.AdviceProto':
'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto':
'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto':
'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto':
'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder':
'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler':
'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise':
'tf.compat.v1.profiler.advise',
'tf.profiler.profile':
'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log':
'tf.compat.v1.profiler.write_op_log',
'tf.py_func':
'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType':
'tf.compat.v1.python_io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions':
'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter':
'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator':
'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr':
'tf.linalg.qr',
'tf.quantize':
'tf.quantization.quantize',
'tf.quantized_concat':
'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue':
'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value':
'tf.compat.v1.ragged.constant_value',
'tf.random.get_seed':
'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed':
'tf.compat.v1.random.set_random_seed',
'tf.random_crop':
'tf.image.random_crop',
'tf.random_gamma':
'tf.random.gamma',
'tf.random_normal':
'tf.random.normal',
'tf.random_shuffle':
'tf.random.shuffle',
'tf.random_uniform':
'tf.random.uniform',
'tf.read_file':
'tf.io.read_file',
'tf.real':
'tf.math.real',
'tf.reciprocal':
'tf.math.reciprocal',
'tf.regex_replace':
'tf.strings.regex_replace',
'tf.report_uninitialized_variables':
'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph':
'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path':
'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile':
'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources':
'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource':
'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path':
'tf.compat.v1.resource_loader.readahead_file_path',
'tf.resource_variables_enabled':
'tf.compat.v1.resource_variables_enabled',
'tf.reverse_v2':
'tf.reverse',
'tf.rint':
'tf.math.rint',
'tf.rsqrt':
'tf.math.rsqrt',
'tf.saved_model.Builder':
'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY':
'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_signature_def':
'tf.compat.v1.saved_model.build_signature_def',
'tf.saved_model.build_tensor_info':
'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder':
'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.classification_signature_def':
'tf.compat.v1.saved_model.classification_signature_def',
'tf.saved_model.constants.ASSETS_DIRECTORY':
'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY':
'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY':
'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY':
'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB':
'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT':
'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION':
'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY':
'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME':
'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save':
'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.is_valid_signature':
'tf.compat.v1.saved_model.is_valid_signature',
'tf.saved_model.loader.load':
'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory':
'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op':
'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore':
'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore':
'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory':
'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.predict_signature_def':
'tf.compat.v1.saved_model.predict_signature_def',
'tf.saved_model.regression_signature_def':
'tf.compat.v1.saved_model.regression_signature_def',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS':
'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME':
'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES':
'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES':
'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY':
'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS':
'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME':
'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS':
'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS':
'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME':
'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS':
'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.build_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature':
'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def':
'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def',
'tf.saved_model.simple_save':
'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU':
'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING':
'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU':
'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING':
'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info':
'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info':
'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add':
'tf.compat.v1.scatter_add',
'tf.scatter_div':
'tf.compat.v1.scatter_div',
'tf.scatter_max':
'tf.compat.v1.scatter_max',
'tf.scatter_min':
'tf.compat.v1.scatter_min',
'tf.scatter_mul':
'tf.compat.v1.scatter_mul',
'tf.scatter_nd_add':
'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub':
'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_update':
'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub':
'tf.compat.v1.scatter_sub',
'tf.scatter_update':
'tf.compat.v1.scatter_update',
'tf.segment_max':
'tf.math.segment_max',
'tf.segment_mean':
'tf.math.segment_mean',
'tf.segment_min':
'tf.math.segment_min',
'tf.segment_prod':
'tf.math.segment_prod',
'tf.segment_sum':
'tf.math.segment_sum',
'tf.self_adjoint_eig':
'tf.linalg.eigh',
'tf.self_adjoint_eigvals':
'tf.linalg.eigvalsh',
'tf.serialize_many_sparse':
'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse':
'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor':
'tf.io.serialize_tensor',
'tf.set_random_seed':
'tf.compat.v1.set_random_seed',
'tf.setdiff1d':
'tf.compat.v1.setdiff1d',
'tf.sets.set_difference':
'tf.sets.difference',
'tf.sets.set_intersection':
'tf.sets.intersection',
'tf.sets.set_size':
'tf.sets.size',
'tf.sets.set_union':
'tf.sets.union',
'tf.space_to_depth':
'tf.compat.v1.space_to_depth',
'tf.sparse.matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge':
'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder':
'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse':
'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse':
'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows':
'tf.sparse.fill_empty_rows',
'tf.sparse_mask':
'tf.sparse.mask',
'tf.sparse_maximum':
'tf.sparse.maximum',
'tf.sparse_merge':
'tf.compat.v1.sparse_merge',
'tf.sparse_minimum':
'tf.sparse.minimum',
'tf.sparse_placeholder':
'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse':
'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse':
'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder':
'tf.sparse.reorder',
'tf.sparse_reset_shape':
'tf.sparse.reset_shape',
'tf.sparse_reshape':
'tf.sparse.reshape',
'tf.sparse_retain':
'tf.sparse.retain',
'tf.sparse_segment_mean':
'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n':
'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum':
'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice':
'tf.sparse.slice',
'tf.sparse_softmax':
'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul':
'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense':
'tf.sparse.to_dense',
'tf.sparse_to_dense':
'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator':
'tf.sparse.to_indicator',
'tf.sparse_transpose':
'tf.sparse.transpose',
'tf.spectral.dct':
'tf.signal.dct',
'tf.spectral.fft':
'tf.signal.fft',
'tf.spectral.fft2d':
'tf.signal.fft2d',
'tf.spectral.fft3d':
'tf.signal.fft3d',
'tf.spectral.idct':
'tf.signal.idct',
'tf.spectral.ifft':
'tf.signal.ifft',
'tf.spectral.ifft2d':
'tf.signal.ifft2d',
'tf.spectral.ifft3d':
'tf.signal.ifft3d',
'tf.spectral.irfft':
'tf.signal.irfft',
'tf.spectral.irfft2d':
'tf.signal.irfft2d',
'tf.spectral.irfft3d':
'tf.signal.irfft3d',
'tf.spectral.rfft':
'tf.signal.rfft',
'tf.spectral.rfft2d':
'tf.signal.rfft2d',
'tf.spectral.rfft3d':
'tf.signal.rfft3d',
'tf.squared_difference':
'tf.math.squared_difference',
'tf.string_join':
'tf.strings.join',
'tf.string_strip':
'tf.strings.strip',
'tf.string_to_hash_bucket_fast':
'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong':
'tf.strings.to_hash_bucket_strong',
'tf.summary.Event':
'tf.compat.v1.summary.Event',
'tf.summary.FileWriter':
'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache':
'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog':
'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary':
'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription':
'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata':
'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.audio':
'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description':
'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram':
'tf.compat.v1.summary.histogram',
'tf.summary.image':
'tf.compat.v1.summary.image',
'tf.summary.initialize':
'tf.compat.v1.summary.initialize',
'tf.summary.merge':
'tf.compat.v1.summary.merge',
'tf.summary.merge_all':
'tf.compat.v1.summary.merge_all',
'tf.summary.scalar':
'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary':
'tf.compat.v1.summary.tensor_summary',
'tf.summary.text':
'tf.compat.v1.summary.text',
'tf.svd':
'tf.linalg.svd',
'tf.tables_initializer':
'tf.compat.v1.tables_initializer',
'tf.tensor_scatter_add':
'tf.tensor_scatter_nd_add',
'tf.tensor_scatter_sub':
'tf.tensor_scatter_nd_sub',
'tf.tensor_scatter_update':
'tf.tensor_scatter_nd_update',
'tf.test.StubOutForTesting':
'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient_error':
'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir':
'tf.compat.v1.test.get_temp_dir',
'tf.test.mock':
'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path':
'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16':
'tf.compat.v1.to_bfloat16',
'tf.to_complex128':
'tf.compat.v1.to_complex128',
'tf.to_complex64':
'tf.compat.v1.to_complex64',
'tf.to_double':
'tf.compat.v1.to_double',
'tf.to_float':
'tf.compat.v1.to_float',
'tf.to_int32':
'tf.compat.v1.to_int32',
'tf.to_int64':
'tf.compat.v1.to_int64',
'tf.trace':
'tf.linalg.trace',
'tf.train.AdadeltaOptimizer':
'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer':
'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer':
'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer':
'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook':
'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener':
'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator':
'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook':
'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook':
'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer':
'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook':
'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer':
'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook':
'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread':
'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer':
'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession':
'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession':
'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError':
'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook':
'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader':
'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer':
'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook':
'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer':
'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer':
'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner':
'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer':
'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver':
'tf.compat.v1.train.Saver',
'tf.train.SaverDef':
'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold':
'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer':
'tf.estimator.SecondOrStepTimer',
'tf.train.Server':
'tf.distribute.Server',
'tf.train.SessionCreator':
'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager':
'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs':
'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext':
'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook':
'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues':
'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession':
'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook':
'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook':
'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook':
'tf.estimator.SummarySaverHook',
'tf.train.Supervisor':
'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer':
'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo':
'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator':
'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner':
'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step':
'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop':
'tf.compat.v1.train.basic_train_loop',
'tf.train.batch':
'tf.compat.v1.train.batch',
'tf.train.batch_join':
'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists':
'tf.compat.v1.train.checkpoint_exists',
'tf.train.cosine_decay':
'tf.compat.v1.train.cosine_decay',
'tf.train.cosine_decay_restarts':
'tf.compat.v1.train.cosine_decay_restarts',
'tf.train.create_global_step':
'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef':
'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.experimental.MixedPrecisionLossScaleOptimizer':
'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer',
'tf.train.exponential_decay':
'tf.compat.v1.train.exponential_decay',
'tf.train.export_meta_graph':
'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto':
'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes':
'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step':
'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step':
'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step':
'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph':
'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint':
'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer':
'tf.compat.v1.train.input_producer',
'tf.train.inverse_time_decay':
'tf.compat.v1.train.inverse_time_decay',
'tf.train.limit_epochs':
'tf.compat.v1.train.limit_epochs',
'tf.train.linear_cosine_decay':
'tf.compat.v1.train.linear_cosine_decay',
'tf.train.match_filenames_once':
'tf.io.match_filenames_once',
'tf.train.maybe_batch':
'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join':
'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch':
'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join':
'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.natural_exp_decay':
'tf.compat.v1.train.natural_exp_decay',
'tf.train.noisy_linear_cosine_decay':
'tf.compat.v1.train.noisy_linear_cosine_decay',
'tf.train.piecewise_constant':
'tf.compat.v1.train.piecewise_constant',
'tf.train.piecewise_constant_decay':
'tf.compat.v1.train.piecewise_constant_decay',
'tf.train.polynomial_decay':
'tf.compat.v1.train.polynomial_decay',
'tf.train.queue_runner.QueueRunner':
'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner':
'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners':
'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer':
'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint':
'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter':
'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch':
'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join':
'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer':
'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners':
'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer':
'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator':
'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state':
'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start':
'tf.compat.v1.train.warm_start',
'tf.train.write_graph':
'tf.io.write_graph',
'tf.trainable_variables':
'tf.compat.v1.trainable_variables',
'tf.truncated_normal':
'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer':
'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max':
'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean':
'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min':
'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod':
'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n':
'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum':
'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner':
'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope':
'tf.compat.v1.variable_op_scope',
'tf.variable_scope':
'tf.compat.v1.variable_scope',
'tf.variables_initializer':
'tf.compat.v1.variables_initializer',
'tf.verify_tensor_all_finite':
'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function':
'tf.compat.v1.wrap_function',
'tf.write_file':
'tf.io.write_file',
'tf.zeta':
'tf.math.zeta'
}
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/IPython/lib/tests/test_latextools.py | 8 | 3877 | # encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import nose.tools as nt
from IPython.lib import latextools
from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib
from IPython.utils.process import FindCmdError
def test_latex_to_png_dvipng_fails_when_no_cmd():
"""
`latex_to_png_dvipng` should return None when there is no required command
"""
for command in ['latex', 'dvipng']:
yield (check_latex_to_png_dvipng_fails_when_no_cmd, command)
def check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
nt.assert_equals(latextools.latex_to_png_dvipng("whatever", True),
None)
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_dvipng_runs():
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equals(filename, "breqn.sty")
return None
for (s, wrap) in [(u"$$x^2$$", False), (u"x^2", True)]:
yield (latextools.latex_to_png_dvipng, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_dvipng, s, wrap)
@skipif_not_matplotlib
def test_latex_to_png_mpl_runs():
"""
Test that latex_to_png_mpl just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equals(filename, "breqn.sty")
return None
for (s, wrap) in [("$x^2$", False), ("x^2", True)]:
yield (latextools.latex_to_png_mpl, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_mpl, s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
nt.assert_in("data:image/png;base64,iVBOR", img)
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equals(
'\n'.join(latextools.genelatex("body text", False)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}''')
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
nt.assert_equals(filename, "breqn.sty")
return "path/to/breqn.sty"
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equals(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}''')
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
nt.assert_equals(filename, "breqn.sty")
return None
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equals(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}''')
| artistic-2.0 |
drpngx/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py | 9 | 12615 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, noise.shape[1].value)
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
def mock_head(testcase, expected_generator_inputs, expected_real_data,
generator_scope_name):
"""Returns a mock head that validates logits values and variable names."""
discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
generator_var_names = set([
'%s/fully_connected/weights:0' % generator_scope_name,
'%s/fully_connected/biases:0' % generator_scope_name])
discriminator_var_names = set([
'%s/fully_connected/weights:0' % discriminator_scope_name,
'%s/fully_connected/biases:0' % discriminator_scope_name])
def _create_estimator_spec(features, mode, logits, labels):
gan_model = logits # renaming for clarity
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
testcase.assertIsNone(features)
testcase.assertIsNone(labels)
testcase.assertIsInstance(gan_model, namedtuples.GANModel)
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
expected_var_names = (generator_var_names if is_predict else
generator_var_names | discriminator_var_names)
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
assertions = []
def _or_none(x):
return None if is_predict else x
testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
# TODO(joelshor): Add check on `generated_data`.
testcase.assertItemsEqual(
generator_var_names,
set([x.name for x in gan_model.generator_variables]))
testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
# TODO(joelshor): Add check on `discriminator_real_outputs`.
# TODO(joelshor): Add check on `discriminator_gen_outputs`.
if is_predict:
testcase.assertIsNone(gan_model.discriminator_scope)
else:
testcase.assertEqual(discriminator_scope_name,
gan_model.discriminator_scope.name)
with ops.control_dependencies(assertions):
if mode == model_fn_lib.ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
mode=mode, loss=array_ops.zeros([]),
train_op=control_flow_ops.no_op(), training_hooks=[])
elif mode == model_fn_lib.ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data,
loss=array_ops.zeros([]))
elif mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
testcase.fail('Invalid mode: {}'.format(mode))
head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
head.create_estimator_spec = test.mock.MagicMock(
wraps=_create_estimator_spec)
return head
class GANModelFnTest(test.TestCase):
"""Tests that _gan_model_fn passes expected logits to mock head."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_logits_helper(self, mode):
"""Tests that the expected logits are passed to mock head."""
with ops.Graph().as_default():
training_util.get_or_create_global_step()
generator_inputs = {'x': array_ops.zeros([5, 4])}
real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
array_ops.zeros([5, 4]))
generator_scope_name = 'generator'
head = mock_head(self,
expected_generator_inputs=generator_inputs,
expected_real_data=real_data,
generator_scope_name=generator_scope_name)
estimator_spec = estimator._gan_model_fn(
features=generator_inputs,
labels=real_data,
mode=mode,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_scope_name=generator_scope_name,
head=head)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn_lib.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn_lib.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
def test_logits_predict(self):
self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
def test_logits_eval(self):
self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
def test_logits_train(self):
self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
def get_metrics(gan_model):
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
gan_model.real_data, gan_model.generated_data)
}
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
if __name__ == '__main__':
test.main()
| apache-2.0 |
wxgeo/geophar | wxgeometrie/sympy/utilities/runtests.py | 1 | 85989 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
import time
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range, unwrap
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
ON_TRAVIS = os.getenv('TRAVIS_BUILD_NUMBER', None)
# emperically generated list of the proportion of time spent running
# an even split of tests. This should periodically be regenerated.
# A list of [.6, .1, .3] would mean that if the tests are evenly split
# into '1/3', '2/3', '3/3', the first split would take 60% of the time,
# the second 10% and the third 30%. These lists are normalized to sum
# to 1, so [60, 10, 30] has the same behavior as [6, 1, 3] or [.6, .1, .3].
#
# This list can be generated with the code:
# from time import time
# import sympy
#
# delays, num_splits = [], 30
# for i in range(1, num_splits + 1):
# tic = time()
# sympy.test(split='{}/{}'.format(i, num_splits), time_balance=False)
# delays.append(time() - tic)
# tot = sum(delays)
# print([round(x / tot, 4) for x in delays]))
SPLIT_DENSITY = [0.2464, 0.0507, 0.0328, 0.0113, 0.0418, 0.012, 0.0269, 0.0095, 0.091, 0.0215, 0.001, 0.0023, 0.0116, 0.0137, 0.0041, 0.0039, 0.0145, 0.0172, 0.059, 0.0017, 0.0112, 0.0128, 0.0012, 0.0293, 0.0705, 0.0284, 0.1495, 0.0073, 0.0052, 0.0115]
SPLIT_DENSITY_SLOW = [0.3616, 0.0003, 0.0004, 0.0004, 0.0255, 0.0005, 0.0674, 0.0337, 0.1057, 0.0329, 0.0002, 0.0002, 0.0184, 0.0028, 0.0046, 0.0148, 0.0046, 0.0083, 0.0004, 0.0002, 0.0069, 0.0004, 0.0004, 0.0046, 0.0205, 0.1378, 0.1451, 0.0003, 0.0006, 0.0006]
class Skipped(Exception):
pass
class TimeOutError(Exception):
pass
# add more flags ??
future_flags = division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# override reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(os.path.normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return os.path.normcase(sympy_dir)
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(
function, function_args=(),
function_kwargs=None, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
function_kwargs = function_kwargs or {}
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs=None,
doctest_args=(), doctest_kwargs=None,
examples_args=(), examples_kwargs=None):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
test_kwargs = test_kwargs or {}
doctest_kwargs = doctest_kwargs or {}
examples_kwargs = examples_kwargs or {'quiet': True}
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if sys.platform != "win32" and not PY3 and os.path.exists("bin/test"):
# run Sage tests; Sage currently doesn't support Windows or Python 3
# Only run Sage tests if 'bin/test' is present (it is missing from
# our release because everything in the 'bin' directory gets
# installed).
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py",
shell=True, cwd=os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
The ``time_balance`` option can be passed in conjunction with ``split``.
If ``time_balance=True`` (the default for ``sympy.test``), sympy will attempt
to split the tests such that each split takes equal time. This heuristic
for balancing is based on pre-recorded test data.
>>> sympy.test(split='1/2', time_balance=True) # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
fail_on_timeout = kwargs.get("fail_on_timeout", False)
if ON_TRAVIS and timeout is False:
# Travis times out if no activity is seen for 10 minutes.
timeout = 595
fail_on_timeout = True
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
time_balance = kwargs.get('time_balance', True)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
fast_threshold = kwargs.get('fast_threshold', None)
slow_threshold = kwargs.get('slow_threshold', None)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed,
fast_threshold=fast_threshold,
slow_threshold=slow_threshold)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
density = None
if time_balance:
if slow:
density = SPLIT_DENSITY_SLOW
else:
density = SPLIT_DENSITY
if split:
matched = split_list(matched, split, density=density)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout, slow=slow,
enhance_asserts=enhance_asserts, fail_on_timeout=fail_on_timeout))
def doctest(*paths, **kwargs):
r"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/physics/gaussopt.py", # raises deprecation warning
"sympy/galgebra.py", # raises ImportError
"sympy/this.py", # Prints text to the terminal
"sympy/matrices/densearith.py", # raises deprecation warning
"sympy/matrices/densesolve.py", # raises deprecation warning
"sympy/matrices/densetools.py", # raises deprecation warning
"sympy/physics/unitsystems.py", # raises deprecation warning
"sympy/parsing/latex/_antlr/latexlexer.py", # generated code
"sympy/parsing/latex/_antlr/latexparser.py", # generated code
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# Use a non-windowed backend, so that the tests work on Travis
import matplotlib
matplotlib.use('Agg')
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
warnings.filterwarnings('error', '.*', DeprecationWarning, module='sympy.*')
r = PyTestReporter(verbose, split=split, colors=colors,\
force_colors=force_colors)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split, density=None):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
`density` may be specified as a list. If specified,
tests will be balanced so that each split has as equal-as-possible
amount of mass according to `density`.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
if not density:
return l[(i - 1)*len(l)//t : i*len(l)//t]
# normalize density
tot = sum(density)
density = [x / tot for x in density]
def density_inv(x):
"""Interpolate the inverse to the cumulative
distribution function given by density"""
if x <= 0:
return 0
if x >= sum(density):
return 1
# find the first time the cumulative sum surpasses x
# and linearly interpolate
cumm = 0
for i, d in enumerate(density):
cumm += d
if cumm >= x:
break
frac = (d - (cumm - x)) / d
return (i + frac) / len(density)
lower_frac = density_inv((i - 1) / t)
higher_frac = density_inv(i / t)
return l[int(lower_frac*len(l)) : int(higher_frac*len(l))]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None, fast_threshold=None, slow_threshold=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
# Defaults in seconds, from human / UX design limits
# http://www.nngroup.com/articles/response-times-3-important-limits/
#
# These defaults are *NOT* set in stone as we are measuring different
# things, so others feel free to come up with a better yardstick :)
if fast_threshold:
self._fast_threshold = float(fast_threshold)
else:
self._fast_threshold = 0.1
if slow_threshold:
self._slow_threshold = float(slow_threshold)
else:
self._slow_threshold = 10
def test(self, sort=False, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
elif slow:
pass
else:
random.seed(self._seed)
random.shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow,
enhance_asserts, fail_on_timeout)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False,
enhance_asserts=False, fail_on_timeout=False):
reporter = self._reporter
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
reporter.import_error(filename, sys.exc_info())
return
except Exception:
reporter.test_exception(sys.exc_info())
clear_cache()
self._count += 1
random.seed(self._seed)
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# We have to be careful about decorated functions. As long as
# the decorator uses functools.wraps, we can detect it.
funcs = []
for f in gl:
if (f.startswith("test_") and (inspect.isfunction(gl[f])
or inspect.ismethod(gl[f]))):
func = gl[f]
# Handle multiple decorators
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if inspect.getsourcefile(func) == filename:
funcs.append(gl[f])
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if inspect.isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
reporter.entering_filename(filename, len(funcs))
raise
reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
start = time.time()
reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout, fail_on_timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
reporter.test_skip(v)
elif t.__name__ == "XFail":
reporter.test_xfail()
elif t.__name__ == "XPass":
reporter.test_xpass(v)
else:
reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
reporter.test_pass()
taken = time.time() - start
if taken > self._slow_threshold:
reporter.slow_test_functions.append((f.__name__, taken))
if getattr(f, '_slow', False) and slow:
if taken < self._fast_threshold:
reporter.fast_test_functions.append((f.__name__, taken))
reporter.leaving_filename()
def _timeout(self, function, timeout, fail_on_timeout):
def callback(x, y):
signal.alarm(0)
if fail_on_timeout:
raise TimeOutError("Timed out after %d seconds" % timeout)
else:
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([os.path.normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
has_dependencies = self._process_dependencies(test.globs['_doctest_depends_on'])
if has_dependencies is not True:
# has_dependencies is either True or a message
self._reporter.test_skip(v="\n" + has_dependencies)
continue
if self._reporter._verbose:
self._reporter.write("\n{} ".format(test.name))
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
r"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [os.path.normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return "Could not find %s" % ex
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return "Could not import matplotlib"
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return "Could not import %s" % mod
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version to look harder for code that
appears comes from a different module. For example, the @vectorize
decorator makes it look like functions come from multidimensional.py
even though their code exists elsewhere.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(unwrap(val)) or
inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall(r"line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
obj = unwrap(obj)
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occurring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
self._active_file = ''
self._active_f = None
# TODO: Should these be protected?
self.slow_test_functions = []
self.fast_test_functions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
# remove the first item, as that is always runtests.py
tb = tb.tb_next
t = traceback.format_exception(e, val, tb)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
numpy = import_module('numpy')
self.write("numpy: %s\n" % (None if not numpy else numpy.__version__))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if self.slow_test_functions:
self.write_center('slowest tests', '_')
sorted_slow = sorted(self.slow_test_functions, key=lambda r: r[1])
for slow_func_name, taken in sorted_slow:
print('%s - Took %.3f seconds' % (slow_func_name, taken))
if self.fast_test_functions:
self.write_center('unexpectedly fast tests', '_')
sorted_fast = sorted(self.fast_test_functions,
key=lambda r: r[1])
for fast_func_name, taken in sorted_fast:
print('%s - Took %.3f seconds' % (fast_func_name, taken))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
if self._verbose:
if v is not None:
self.write(message + ' ', "Blue")
else:
self.write(" - ", "Blue")
self.write(char, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
if exc_info[0] is TimeOutError:
self.write("T", "Red")
else:
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
| gpl-2.0 |
Erotemic/hotspotter | _scripts/main.py | 2 | 5485 | #!/usr/bin/env python
# For py2exe
import PIL.TiffImagePlugin
import PIL.Image
import PIL.PngImagePlugin
import PIL.JpegImagePlugin
import PIL.GifImagePlugin
import PIL.PpmImagePlugin
import argparse
import inspect
import os, sys
from os.path import join, dirname
def emergency_msgbox(title, msg):
'Make a non modal critical QMessageBox.'
from PyQt4.Qt import QMessageBox
msgBox = QMessageBox(None);
msgBox.setAttribute(Qt.WA_DeleteOnClose)
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.setWindowTitle(title)
msgBox.setText(msg)
msgBox.setModal(False)
msgBox.open(msgBox.close)
msgBox.show()
return msgBox
def ensure_tpl_libs():
print('Ensuring third party libraries')
try: # Ensure that TPL's lib files are in PATH
#from hotspotter.standalone import find_hotspotter_root_dir
print('Can import hotspotter?')
import hotspotter
print(' ... yes')
TPL_LIB_DIR = join(dirname(hotspotter.__file__), 'tpl/lib', sys.platform)
sys.path.insert(0, TPL_LIB_DIR)
ext = {'linux2':'.ln','darwin':'.mac','win32':'.exe'}[sys.platform]
# Ensure that hesaff is executable
hesaff_fname = TPL_LIB_DIR+'/hesaff'+ext
is_executable = lambda fname: bin(int(oct(os.stat(fname).st_mode)[4]))[4]
if not is_executable(hesaff_fname):
os.system('chmod 775 '+hesaff_fname)
print('Can import cv2?')
import cv2
print(' ... yes')
print('Can import hotspotter.tpl.pyflann?')
import hotspotter.tpl.pyflann
print(' ... yes')
except Exception as ex:
print('\n\n!!! TPL ERROR !!!')
PYTHONPATH = os.getenv('PYTHONPATH')
PATH = os.getenv('PATH')
print('PYTHONPATH = '+repr(PYTHONPATH))
print('PATH = '+repr(PATH))
print('''You must download hotspotter\'s 3rd party libraries before you can run it.
git clone https://github.com/Erotemic:tpl-hotspotter.git tpl''')
raise
def parse_arguments():
print('Parsing arguments')
parser = argparse.ArgumentParser(description='HotSpotter - Instance Recognition', prefix_chars='+-')
def_on = {'action':'store_false', 'default':True}
def_off = {'action':'store_true', 'default':False}
parser.add_argument('-l', '--log-all',
dest='logall_bit', help='Writes all logs', **def_off)
parser.add_argument('--cmd', dest='cmd_bit',
help='Forces command line mode', **def_off)
parser.add_argument('-g', '--gui-off', dest='gui_bit',
help='Runs HotSpotter in command line mode', **def_on)
parser.add_argument('-a', '--autoload-off', dest='autoload_bit',
help='Starts HotSpotter without loading a database', **def_on)
parser.add_argument('-dp', '--delete-preferences', dest='delpref_bit',
help='Deletes preferences in ~/.hotspotter', **def_off)
args, unknown = parser.parse_known_args()
return args
def initQtApp():
# Attach to QtConsole's QApplication if able
from PyQt4.Qt import QCoreApplication, QApplication
app = QCoreApplication.instance()
isRootApp = app is None
if isRootApp: # if not in qtconsole
# configure matplotlib
import matplotlib
print('Configuring matplotlib for Qt4')
matplotlib.use('Qt4Agg')
# Run new root application
print('Starting new QApplication')
app = QApplication(sys.argv)
else:
print('Running using parent QApplication')
return app, isRootApp
def executeEventLoop(app):
print('Running the application event loop')
sys.stdout.flush()
sys.exit(app.exec_())
# MAIN ENTRY POINT
if __name__ == '__main__':
# 1) Multiprocess Initialization
from multiprocessing import freeze_support
freeze_support()
# 2) TPL Initialization
ensure_tpl_libs()
# 3) Qt Initialization
args = parse_arguments()
app, isRootApp = initQtApp()
# 4) HotSpotter Initialization
from hotspotter.other.logger import hsl
from hotspotter.standalone import delete_preference_dir
from hotspotter.Facade import Facade
if args.logall_bit:
hsl.enable_global_logs()
if args.delpref_bit:
delete_preference_dir()
# 5) HotSpotter Execution
fac = Facade(use_gui=args.gui_bit, autoload=args.autoload_bit)
# Register Facade functions into current namespace
# ### SNIPIT: Namespace Class Functions
for (name, value) in inspect.getmembers(Facade, predicate=inspect.ismethod):
if name.find('_') != 0:
exec('def '+name+'(*args, **kwargs): fac.'+name+'(*args, **kwargs)')
# ### ---
# Defined Aliases
stat, status = [lambda : fac.print_status()]*2
removec, = [lambda : fac.remove_cid()]
rename, = [lambda new_name : fac.rename_cid(new_name)]
# Get developer variables
# ### SNIPIT: Execute File
with open('dev.py', 'r') as devfile:
devpy = devfile.read()
exec(devpy)
# ### ----
run_exec = isRootApp
if args.cmd_bit:
# Start IPython command line mode
from hotspotter.helpers import in_IPython, have_IPython
run_exec = False
if not in_IPython() and have_IPython():
import IPython
IPython.embed()
# Run Event Loop, but do not block QTConsole or IPython
if run_exec:
executeEventLoop(app)
| apache-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/sparse/test_arithmetics.py | 18 | 19342 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayArithmetics(object):
_base = np.array
_klass = pd.SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore', divide='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
# sparse & sparse
self._assert((a + b).to_dense(), a_dense + b_dense)
self._assert((b + a).to_dense(), b_dense + a_dense)
self._assert((a - b).to_dense(), a_dense - b_dense)
self._assert((b - a).to_dense(), b_dense - a_dense)
self._assert((a * b).to_dense(), a_dense * b_dense)
self._assert((b * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b).to_dense(), a_dense // b_dense)
self._assert((b // a).to_dense(), b_dense // a_dense)
self._assert((a % b).to_dense(), a_dense % b_dense)
self._assert((b % a).to_dense(), b_dense % a_dense)
self._assert((a ** b).to_dense(), a_dense ** b_dense)
self._assert((b ** a).to_dense(), b_dense ** a_dense)
# sparse & dense
self._assert((a + b_dense).to_dense(), a_dense + b_dense)
self._assert((b_dense + a).to_dense(), b_dense + a_dense)
self._assert((a - b_dense).to_dense(), a_dense - b_dense)
self._assert((b_dense - a).to_dense(), b_dense - a_dense)
self._assert((a * b_dense).to_dense(), a_dense * b_dense)
self._assert((b_dense * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b_dense).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b_dense / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b_dense).to_dense(), a_dense // b_dense)
self._assert((b_dense // a).to_dense(), b_dense // a_dense)
self._assert((a % b_dense).to_dense(), a_dense % b_dense)
self._assert((b_dense % a).to_dense(), b_dense % a_dense)
self._assert((a ** b_dense).to_dense(), a_dense ** b_dense)
self._assert((b_dense ** a).to_dense(), b_dense ** a_dense)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert res.dtype == np.bool
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
def test_float_scalar(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
def test_float_scalar_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_same_index_comparison(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_different_kind(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind='integer')
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block', fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=1)
b = self._klass(rvalues, kind='block', fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self):
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
def test_int_array_comparison(self):
# int32 NI ATM
for dtype in ['int64']:
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_bool_same_index(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, True, True], dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_bool_array_logical(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, False, True, True],
dtype=np.bool)
rvalues = self._base([True, False, False, True, False, True],
dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self):
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
def test_mixed_array_comparison(self):
# int32 NI ATM
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
class TestSparseSeriesArithmetic(TestSparseArrayArithmetics):
_base = pd.Series
_klass = pd.SparseSeries
def _assert(self, a, b):
tm.assert_series_equal(a, b)
def test_alignment(self):
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[1, 2, 3, 4])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[10, 11, 12, 13])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
| mit |
IndraVikas/scikit-learn | sklearn/utils/tests/test_extmath.py | 130 | 16270 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/base/tests/test_shrink_pickle.py | 6 | 7890 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 09 16:00:27 2012
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import iterkeys, cPickle, BytesIO
import numpy as np
import statsmodels.api as sm
import pandas as pd
from numpy.testing import assert_
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = np.__version__ < '1.5'
winoldnp = iswin & npversionless15
def check_pickle(obj):
fh = BytesIO()
cPickle.dump(obj, fh, protocol=cPickle.HIGHEST_PROTOCOL)
plen = fh.tell()
fh.seek(0, 0)
res = cPickle.load(fh)
fh.close()
return res, plen
class RemoveDataPickle(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
self.l_max = 20000
def test_remove_data_pickle(self):
if winoldnp:
raise SkipTest
results = self.results
xf = self.xf
pred_kwds = self.predict_kwds
pred1 = results.predict(xf, **pred_kwds)
#create some cached attributes
results.summary()
res = results.summary2() # SMOKE test also summary2
# uncomment the following to check whether tests run (7 failures now)
#np.testing.assert_equal(res, 1)
#check pickle unpickle works on full results
#TODO: drop of load save is tested
res, l = check_pickle(results._results)
#remove data arrays, check predict still works
results.remove_data()
pred2 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred2, pred1)
#pickle, unpickle reduced array
res, l = check_pickle(results._results)
#for testing attach res
self.res = res
#Note: l_max is just a guess for the limit on the length of the pickle
l_max = self.l_max
assert_(l < l_max, msg='pickle length not %d < %d' % (l, l_max))
pred3 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred3, pred1)
def test_remove_data_docstring(self):
assert_(self.results.remove_data.__doc__ is not None)
def test_pickle_wrapper(self):
fh = BytesIO() # use cPickle with binary content
# test unwrapped results load save pickle
self.results._results.save(fh)
fh.seek(0, 0)
res_unpickled = self.results._results.__class__.load(fh)
assert_(type(res_unpickled) is type(self.results._results))
# test wrapped results load save
fh.seek(0, 0)
self.results.save(fh)
fh.seek(0, 0)
res_unpickled = self.results.__class__.load(fh)
fh.close()
# print type(res_unpickled)
assert_(type(res_unpickled) is type(self.results))
before = sorted(iterkeys(self.results.__dict__))
after = sorted(iterkeys(res_unpickled.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results._results.__dict__))
after = sorted(iterkeys(res_unpickled._results.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results.model.__dict__))
after = sorted(iterkeys(res_unpickled.model.__dict__))
assert_(before == after, msg='not equal %r and %r' % (before, after))
before = sorted(iterkeys(self.results._cache))
after = sorted(iterkeys(res_unpickled._cache))
assert_(before == after, msg='not equal %r and %r' % (before, after))
class TestRemoveDataPickleOLS(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestRemoveDataPickleWLS(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestRemoveDataPicklePoisson(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestRemoveDataPickleNegativeBinomial(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
self.results = mod.fit(disp=0)
class TestRemoveDataPickleLogit(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestRemoveDataPickleRLM(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestRemoveDataPickleGLM(RemoveDataPickle):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestPickleFormula(RemoveDataPickle):
@classmethod
def setup_class(cls):
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
cls.exog = pd.DataFrame(x, columns=["A", "B", "C"])
cls.xf = pd.DataFrame(0.25 * np.ones((2, 3)),
columns=cls.exog.columns)
cls.l_max = 900000 # have to pickle endo/exog to unpickle form.
def setup(self):
x = self.exog
np.random.seed(123)
y = x.sum(1) + np.random.randn(x.shape[0])
y = pd.Series(y, name="Y")
X = self.exog.copy()
X["Y"] = y
self.results = sm.OLS.from_formula("Y ~ A + B + C", data=X).fit()
if __name__ == '__main__':
for cls in [TestRemoveDataPickleOLS, TestRemoveDataPickleWLS,
TestRemoveDataPicklePoisson,
TestRemoveDataPickleNegativeBinomial,
TestRemoveDataPickleLogit, TestRemoveDataPickleRLM,
TestRemoveDataPickleGLM]:
print(cls)
cls.setup_class()
tt = cls()
tt.setup()
tt.test_remove_data_pickle()
tt.test_remove_data_docstring()
tt.test_pickle_wrapper()
| bsd-3-clause |
last-one/tools | caffe/result/plot_loss_acc_curve.py | 1 | 1617 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import sys
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log', dest='log', help='the training log')
parser.add_argument('-o', '--output', dest='output_path', help='the path to save the picture', type=str, default=None)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
logs = open(args.log, 'r')
lines = logs.readlines()
logs.close()
name = args.log.split('/')[-1].split('.')[0] + '.jpg'
if args.output_path != None:
name = os.path.join(args.output_path, name)
train_loss = []
test_acc = []
max_iter = 0
display = 0
test_interval = -1
for line in lines:
if line.find('Iteration') == -1 or line.find('loss = ') == -1:
continue
st_iter = line.find('Iteration')
ed_iter = st_iter + 10 + line[st_iter + 10:].find(' ')
display = max_iter
max_iter = int(line[st_iter + 9: ed_iter])
display = max_iter - display
pos_loss = line.find('loss = ')
loss = float(line[pos_loss + 7: ])
train_loss.append(loss)
max_iter += display
_, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(np.arange(0, max_iter, display), train_loss)
ax1.set_xlabel('iteration')
ax1.set_ylabel('train loss')
if test_interval != -1:
ax2.plot(test_interval * np.arange(len(test_acc)), test_acc, 'r')
ax2.set_ylabel('test accuracy')
plt.savefig(name)
| bsd-2-clause |
aabadie/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
jdmonaco/grid-remapping-model | src/analysis/altmodels.py | 1 | 13349 | #encoding: utf-8
"""
grid.analysis.altmodels -- Analysis simulating model variants for comparison
Exports: ModelComparison
Written by Joe Monaco, 02/05/2011.
Copyright (c) 2011 Johns Hopkins University. All rights reserved.
"""
# Library imports
from scipy.stats import sem
import os, numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# Package imports
from ..place_network import PlaceNetworkStd
from ..core.analysis import AbstractAnalysis
from ..tools.images import array_to_image
from ..ratemap import CheckeredRatemap
from ..dmec import GridCollection
from .compare import compare_AB
from .map_funcs import get_tuned_weights
class ModelComparison(AbstractAnalysis):
"""
Load a standard simulation from pre-existing data (or simulate a new map)
and then simulate several model variants to compare place fields size
and location differences.
See core.analysis.AbstractAnalysis documentation and collect_data method
signature and docstring for usage.
"""
label = "alt models"
def collect_data(self, load_dir=None, alpha=0.3, gamma=1.0, rec_tuned=False):
"""Run a standard simulation and then variants using the same network
Keyword arguments:
load_dir -- if loading pre-existing network, set directory here
alpha -- learning parameter for tuned weights (get_tuned_weights)
gamma -- gain of recurrent excitation (based on overlap)
rec_tuned -- whether recurrent variant is based on tuned output (True)
or the standard output (False)
Set save_maps to True to save the spatial maps for the sample.
"""
self.results['model_types'] = ('std', 'fwd', 'tuned', 'rec')
if load_dir is not None:
if not os.path.isdir(load_dir):
raise ValueError, 'invalid load directory'
self.results['load_dir'] = os.path.abspath(load_dir)
self.out('Loading network from\n%s...'%self.results['load_dir'])
os.chdir(load_dir)
l = np.load
EC = GridCollection(
_phi=l('phi.npy'), _psi=l('psi.npy'), spacing=l('spacing.npy'))
model = PlaceNetworkStd(EC=EC, W=l('W.npy'), refresh_weights=False)
os.chdir(self.datadir)
else:
self.out('Creating new grid inputs and place network...')
EC = GridCollection()
model = PlaceNetworkStd(EC=EC)
W = model.W
def get_norms(M):
return np.sqrt((M**2).sum(axis=0))
def store_data(prefix, pmap):
udata = pmap.get_unit_data()
fdata = pmap.get_field_data()
self.results['%s_sparsity'%prefix] = pmap.sparsity
self.results['%s_num_fields'%prefix] = udata['num_fields']
self.results['%s_area'%prefix] = fdata['area']
self.results['%s_diameter'%prefix] = fdata['diameter']
self.results['%s_x'%prefix] = fdata['x']
self.results['%s_y'%prefix] = fdata['y']
if not os.path.exists('%s_map.tar.gz'%prefix):
pmap.tofile('%s_map'%prefix)
return
# Get input strength map
self.out('Computing grid input strengths...')
EC_R = EC.get_z_stack()
EC_norms = get_norms(EC_R)
np.save('EC_norms.npy', EC_norms)
array_to_image(EC_norms, 'EC_norms.png', cmap=mpl.cm.gray_r)
array_to_image(EC_norms, 'EC_norms_jet.png', cmap=mpl.cm.jet)
# Run the standard simulation
if not os.path.exists('std_map.tar.gz'):
self.out('Running standard simulation...')
model.advance()
pmap = CheckeredRatemap(model)
else:
self.out('Loading standard simulation data...')
pmap = CheckeredRatemap.fromfile('std_map.tar.gz')
store_data('std', pmap)
std_num_active = pmap.num_active
self.out('Standard active units = %d'%std_num_active)
R = pmap.Map
array_to_image(get_norms(R), 'std_norms.png', cmap=mpl.cm.gray_r)
array_to_image(get_norms(R), 'std_norms_jet.png', cmap=mpl.cm.jet)
def sparsity_match_threshold(Map):
self.out('Searching for sparsity-matching threshold...')
N, H, W = Map.shape
I = np.empty((N,), 'd')
for i in xrange(N):
I[i] = Map[i].max()
# Test activity peaks as thresholds to find sparsity-matching threshold
I.sort()
R_ = np.empty(Map.shape, 'd') # probe workspace
thresh = 0
for i in xrange(N):
R_[:] = Map # reset
Rmax = R_.max()
num_active = 0
for j in xrange(N):
if (R_[j].max()>0.2*Rmax):
if (R_[j]>0.2*R_[j].max()).sum() > 50:
num_active += 1
self.out.printf('%d '%num_active)
if num_active < std_num_active:
self.out.printf('\n')
self.out('... sparsity match at %.4f ...'%thresh)
break
thresh = I[i] # get next peak
R_ -= thresh # and apply test threshold
R_[R_<0] = 0
del R_
if num_active >= std_num_active:
self.out.printf('\n')
if thresh:
Map -= thresh
Map[Map<0] = 0
return
# Run feedforward inhibition simulation
if not os.path.exists('fwd_map.tar.gz'):
self.out('Computing feedforward model variant...')
R[:] = 0 # using R matrix as a spatial map workspace
for i in xrange(model.N_CA):
R[i] = model.beta * (W[i].reshape(model.N_EC, 1, 1) * EC_R).sum(axis=0)
# Feedforward inhibition as sparsity-matching threshold
sparsity_match_threshold(R)
pmap.reset()
pmap.compute_coverage()
self.out('Feedforward active units = %d'%pmap.num_active)
else:
self.out('Loading feedforward model data...')
pmap = CheckeredRatemap.fromfile('fwd_map.tar.gz')
R = pmap.Map
array_to_image(get_norms(R), 'fwd_norms.png', cmap=mpl.cm.gray_r)
store_data('fwd', pmap)
# Run associatively tuned simulation
if not os.path.exists('tuned_map.tar.gz'):
self.out('Running input tuned simulation (alpha = %.2f)...'%alpha)
model.W = get_tuned_weights(
CheckeredRatemap.fromfile('std_map.tar.gz'), W, EC, alpha,
grow_synapses=True)
model.reset()
model.advance()
pmap = CheckeredRatemap(model)
pmap.compute_coverage()
self.out('Tuned active units = %d'%pmap.num_active)
else:
self.out('Loading input tuned model data...')
pmap = CheckeredRatemap.fromfile('tuned_map.tar.gz')
R = pmap.Map
array_to_image(get_norms(R), 'tuned_norms.png', cmap=mpl.cm.gray_r)
store_data('tuned', pmap)
# Run recurrent excitation simulation
if not os.path.exists('rec_map.tar.gz'):
# Construct the E-E weight matrix
self.out('Constructing E-E weight matrix...')
if rec_tuned:
self.out('--> Using input-tuned output as base')
else:
self.out('--> Using standard output as base')
pmap = CheckeredRatemap.fromfile('std_map.tar.gz')
R = pmap.Map
N, H, W = R.shape
J = np.zeros((N, N), 'd')
for i in xrange(N):
for j in xrange(i+1, N):
J[i,j] = J[j,i] = gamma * \
(pmap.single_maps[i] * pmap.single_maps[j]).sum()
if J[i,j] > 0:
J[i,j] = J[j,i] = J[i,j] / \
min(pmap.single_maps[i].sum(),
pmap.single_maps[j].sum())
# Add in first-order recurrent excitation across the map
self.out('Adding first-order recurrent excitation to map...')
for i in xrange(H):
for j in xrange(W):
R[:,i,j] += np.dot(R[:,i,j], J) # feedforward
R[:,i,j] += np.dot(R[:,i,j], J) # feedback
# Feedforward threshold to maintain activity level
sparsity_match_threshold(R)
pmap.reset()
pmap.compute_coverage()
self.out('Recurrent active units = %d'%pmap.num_active)
else:
self.out('Loading recurrent model data...')
pmap = CheckeredRatemap.fromfile('rec_map.tar.gz')
R = pmap.Map
array_to_image(get_norms(R), 'rec_norms.png', cmap=mpl.cm.gray_r)
store_data('rec', pmap)
# Good-bye!
self.out('All done!')
def create_plots(self, legend=False):
# Move into data directoary and start logging
os.chdir(self.datadir)
self.out.outfd = file('figure.log', 'w')
# Set up main figure for plotting
self.figure = {}
figsize = 8, 10
plt.rcParams['figure.figsize'] = figsize
self.figure['altmodels'] = f = plt.figure(figsize=figsize)
f.suptitle(self.label.title())
# Load data
data = self.results
models = data['model_types']
getval = lambda pre, k: data[pre + '_' + k]
# Log some data
def print_mean_sem(value, arr):
if type(arr) is float:
self.out('%s = %.4f'%(value, arr))
else:
self.out('%s = %.4f +/- %.4f'%(value, arr.mean(), sem(arr)))
for prefix in models:
for val in ('sparsity', 'num_fields', 'area', 'diameter'):
key = prefix + '_' + val
print_mean_sem(key, data[key])
# Draw place fields as circles
def draw_circle_field_plots(ax, prefix):
x = getval(prefix, 'x')
y = getval(prefix, 'y')
d = getval(prefix, 'diameter')
nfields = len(x)
ax.plot(x, y, 'k+', ms=6, aa=False)
for i in xrange(nfields):
ell = mpl.patches.Ellipse((x[i], y[i]), d[i], d[i],
fill=False, lw=1, ec='k')
ell.clip_box = ax.bbox
ax.add_artist(ell)
ax.axis("image")
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
ax.set_title(prefix)
return ax
# Render place field plots
rows = 3
cols = 2
for i,prefix in enumerate(models):
draw_circle_field_plots(plt.subplot(rows, cols, i+1), prefix)
# Statistics plot
ax = plt.subplot(rows, cols, 5)
markers = "ods^"
for i,prefix in enumerate(models):
a = getval(prefix, 'area')
nf = getval(prefix, 'num_fields')
ax.errorbar(a.mean(), nf.mean(), xerr=sem(a), yerr=sem(nf),
fmt=markers[i], ecolor='k', elinewidth=1, capsize=4,
ms=6, mfc='k', mec='k', mew=1)
# ax.set_ylim(1, 2)
# ax.set_xlim(xmax=245)
ax.set_xlabel('area')
ax.set_ylabel('num. fields')
# Remapping data
if os.path.exists('remapping.npy'):
self.out('Loading remapping/turnover values...')
remapping, turnover = np.load('remapping.npy')
else:
self.out('Computing remapping/turnover measures...')
pmaps = [CheckeredRatemap.fromfile('%s_map.tar.gz'%p) for p in models]
remapping = []
turnover = []
for pm in pmaps[1:]:
cmpAB = compare_AB(pmaps[0], pm)
remapping.append(cmpAB['remapping'])
turnover.append(cmpAB['turnover'])
np.save('remapping.npy', np.array([remapping, turnover]))
self.out('Remapping: %s'%str(remapping))
self.out('Turnover: %s'%str(turnover))
# Set up bar plot data
ax = plt.subplot(rows, cols, 6)
left = []
height = []
xticklabels = models[1:]
bar_w = 1/float(len(xticklabels))
c = 0
for i in xrange(len(xticklabels)):
left.extend([c-bar_w, c])
height.extend([remapping[i], turnover[i]])
c += 1
# Render the bar chart and legend
bar_cols = mpl.cm.gray(([0.25, 0.6])*c)
bar_h = ax.bar(left, height, width=bar_w,
ec='k', color=bar_cols, linewidth=0, ecolor='k', aa=False)
if legend:
ax.legend(bar_h[:2], ['Remapping', 'Turnover'], loc=1)
ax.hlines(1.0, xmin=-0.5, xmax=c-0.5, linestyle=':', color='k')
ax.set_xlim(-0.5, c-0.5)
ax.set_ylim(0.0, 1.1)
ax.set_xticks(np.arange(c))
ax.set_xticklabels(xticklabels)
plt.draw()
plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize']
self.out.outfd.close()
| mit |
Transkribus/TranskribusDU | usecases/NewsEye/FeatureDefinition_PageXml_std.py | 1 | 14652 | # -*- coding: utf-8 -*-
"""
Standard PageXml features
Copyright Xerox(C) 2016 JL. Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
#not robust to empty arrays, so use our robust intermediary class instead
#from sklearn.preprocessing import StandardScaler
from graph.Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from graph.Transformer import SparseToDense
from graph.Transformer_PageXml import NodeTransformerXYWH, NodeTransformerNeighbors, Node1HotFeatures
from graph.Transformer_PageXml import Edge1HotFeatures, EdgeBooleanFeatures, EdgeNumericalSelector
from graph.Transformer_PageXml import NodeTransformerTextEnclosed, NodeTransformerTextLen
from graph.Transformer_PageXml import EdgeTransformerSourceText, EdgeTransformerTargetText
from graph.PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from graph.FeatureDefinition import FeatureDefinition
from PageXmlSeparatorRegion import Separator_boolean, Separator_num
class FeatureDefinition_PageXml_StandardOnes(FeatureDefinition):
n_QUANTILES = 16
bSeparator = False
def __init__(self, n_tfidf_node=None, t_ngrams_node=None, b_tfidf_node_lc=None
, n_tfidf_edge=None, t_ngrams_edge=None, b_tfidf_edge_lc=None
, bMirrorPage=True, bMultiPage=True):
FeatureDefinition.__init__(self)
self.n_tfidf_node, self.t_ngrams_node, self.b_tfidf_node_lc = n_tfidf_node, t_ngrams_node, b_tfidf_node_lc
self.n_tfidf_edge, self.t_ngrams_edge, self.b_tfidf_edge_lc = n_tfidf_edge, t_ngrams_edge, b_tfidf_edge_lc
self.bMirrorPage = bMirrorPage
self.bMultiPage = bMultiPage
tdifNodeTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc, max_features=self.n_tfidf_node
, analyzer = 'char', ngram_range=self.t_ngrams_node #(2,6)
, dtype=np.float64)
node_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("text", Pipeline([
('selector', NodeTransformerTextEnclosed()),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_node_lc, max_features=self.n_tfidf_node
# , analyzer = 'char', ngram_range=self.tNODE_NGRAMS #(2,6)
# , dtype=np.float64)),
('tfidf', tdifNodeTextVectorizer), #we can use it separately from the pipleline once fitted
('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
])
)
,
("textlen", Pipeline([
('selector', NodeTransformerTextLen()),
('textlen', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("xywh", Pipeline([
('selector', NodeTransformerXYWH()),
#v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
#v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("1hot", Pipeline([
('1hot', Node1HotFeatures()) #does the 1-hot encoding directly
])
)
#, ("sem", Pipeline([
# ('sem', NodeSemanticLabels()) #add semantic labels
# ])
# ) # Added by Animesh
# , ('ocr' , Pipeline([
# ('ocr', NodeOCRFeatures())
# ])
# )
# , ('pnumre' , Pipeline([
# ('pnumre', NodePNumFeatures())
# ])
# )
# , ("doc_tfidf", Pipeline([
# ('zero', Zero2Features())
# #THIS ONE MUST BE LAST, because it include a placeholder column for the doculent-level tfidf
# ])
# )
])
lEdgeFeature = [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("1hot", Pipeline([
('1hot', Edge1HotFeatures(PageNumberSimpleSequenciality()))
])
)
, ("boolean", Pipeline([
('boolean', EdgeBooleanFeatures())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector()),
#v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
# , ("sourcetext0", Pipeline([
# ('selector', EdgeTransformerSourceText(0, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
# , analyzer = 'char', ngram_range=self.t_ngrams_edge #(2,6)
# , dtype=np.float64)),
# ('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
# ])
# )
# , ("targettext0", Pipeline([
# ('selector', EdgeTransformerTargetText(0, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
# , analyzer = 'char', ngram_range=self.t_ngrams_edge
# #, analyzer = 'word', ngram_range=self.tEDGE_NGRAMS
# , dtype=np.float64)),
# ('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
# ])
# )
# , ("sourcetext1", Pipeline([
# ('selector', EdgeTransformerSourceText(1, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
# , analyzer = 'char', ngram_range=self.t_ngrams_edge #(2,6)
# , dtype=np.float64)),
# ('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
# ])
# )
# , ("targettext1", Pipeline([
# ('selector', EdgeTransformerTargetText(1, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
# , analyzer = 'char', ngram_range=self.t_ngrams_edge
# #, analyzer = 'word', ngram_range=self.tEDGE_NGRAMS
# , dtype=np.float64)),
# ('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
# ])
# )
]
if self.bSeparator:
lEdgeFeature = lEdgeFeature + [
('sprtr_bool', Separator_boolean())
, ('sprtr_num' , Separator_num())
]
if bMultiPage:
lEdgeFeature.extend([("sourcetext2", Pipeline([
('selector', EdgeTransformerSourceText(2, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
, analyzer = 'char', ngram_range=self.t_ngrams_edge #(2,6)
, dtype=np.float64)),
('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
])
)
, ("targettext2", Pipeline([
('selector', EdgeTransformerTargetText(2, bMirrorPage=bMirrorPage, bMultiPage=bMultiPage)),
('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_edge_lc, max_features=self.n_tfidf_edge
, analyzer = 'char', ngram_range=self.t_ngrams_edge
#, analyzer = 'word', ngram_range=self.tEDGE_NGRAMS
, dtype=np.float64)),
('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
])
)
])
edge_transformer = FeatureUnion( lEdgeFeature )
#return _node_transformer, _edge_transformer, tdifNodeTextVectorizer
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
self.tfidfNodeTextVectorizer = tdifNodeTextVectorizer
def cleanTransformers(self):
"""
the TFIDF transformers are keeping the stop words => huge pickled file!!!
Here the fix is a bit rough. There are better ways....
JL
"""
self._node_transformer.transformer_list[0][1].steps[1][1].stop_words_ = None #is 1st in the union...
if self.bMirrorPage:
imax = 9
else:
imax = 7
# for i in range(3, imax):
# self._edge_transformer.transformer_list[i][1].steps[1][1].stop_words_ = None #are 3rd and 4th in the union....
return self._node_transformer, self._edge_transformer
class FeatureDefinition_PageXml_StandardOnes_SEP(FeatureDefinition_PageXml_StandardOnes):
bSeparator = True
| bsd-3-clause |
xho95/BuildingMachineLearningSystemsWithPython | ch09/utils.py | 24 | 5568 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
from matplotlib import pylab
import numpy as np
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data")
CHART_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "charts")
for d in [DATA_DIR, CHART_DIR]:
if not os.path.exists(d):
os.mkdir(d)
# Put your directory to the different music genres here
GENRE_DIR = None
GENRE_LIST = ["classical", "jazz", "country", "pop", "rock", "metal"]
# Put your directory to the test dir here
TEST_DIR = None
if GENRE_DIR is None or TEST_DIR is None:
print("Please set GENRE_DIR and TEST_DIR in utils.py")
sys.exit(1)
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid(True)
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
| mit |
bcosenza/patus | tune/plot_training_amm.py | 1 | 1291 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#size = [ '0.96K', '1.92K', '2.88', '3.84K', '48K', '5.76K', '6.72K', '7.68K', '8.64K', '9.6K', '16K', '32K']
size = [ 960, 1920, 2880, 3840, 4800, 5760, 6720, 7680, 8640, 9600, 16000, 32000]
tra = [ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 7, 36]
reg = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#np.arange(0., 5., 0.2)
fig, ax = plt.subplots()
# red dashes, blue squares and green triangles
plt.plot(size, tra, linestyle=':', label='training time')
plt.plot(size, reg, linestyle='--', label='regression time')
#plt.axis([0,32000,0,40])
legend = plt.legend(loc='upper center',fontsize=20) #, shadow=True, fontsize='x-large')
# Put a nicer background color on the legend.
#legend.get_frame().set_facecolor('#00FFCC')
ax.set_ylabel('time ms',fontsize=20)
ax.set_xlabel('training set size',fontsize=20)
#ax.set_xticks(ind + width)
#ax.set_xticklabels(testcases,rotation=90)
#ax.legend(rects, models )
#plt.show()
fig.set_size_inches(18.5, 10.5, forward=True)
plt.savefig('plots/training_amortization.png', bbox_inches='tight')
with PdfPages('plots/training_amortization.pdf') as pdf:
pdf.savefig(plt.gcf())
plt.close() | lgpl-2.1 |
arahlin/healpy | healpy/newvisufunc.py | 4 | 7180 | __all__ = ["mollview", "projplot"]
import numpy as np
from .pixelfunc import ang2pix, npix2nside
from .rotator import Rotator
from matplotlib.projections.geo import GeoAxes
###### WARNING #################
# this module is work in progress, the aim is to reimplement the healpy
# plot functions using the new features of matplotlib and remove most
# of the custom projection code
class ThetaFormatterShiftPi(GeoAxes.ThetaFormatter):
"""Shifts labelling by pi
Shifts labelling from -180,180 to 0-360"""
def __call__(self, x, pos=None):
if x != 0:
x *= -1
if x < 0:
x += 2 * np.pi
return super(ThetaFormatterShiftPi, self).__call__(x, pos)
def lonlat(theta, phi):
"""Converts theta and phi to longitude and latitude
From colatitude to latitude and from astro longitude to geo longitude"""
longitude = -1 * np.asarray(phi)
latitude = np.pi / 2 - np.asarray(theta)
return longitude, latitude
def mollview(
m=None,
rot=None,
coord=None,
unit="",
xsize=1000,
nest=False,
min=None,
max=None,
flip="astro",
format="%g",
cbar=True,
cmap=None,
norm=None,
graticule=False,
graticule_labels=False,
**kwargs
):
"""Plot a healpix map (given as an array) in Mollweide projection.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards roght, west towards left)
format : str, optional
The format of the scale label. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
kwargs : keywords
any additional keyword is passed to pcolormesh
graticule : bool
add graticule
graticule_labels : bool
longitude and latitude labels
"""
# not implemented features
if not (norm is None):
raise NotImplementedError()
# Create the figure
import matplotlib.pyplot as plt
width = 8.5
fig = plt.figure(figsize=(width, width * 0.63))
ax = fig.add_subplot(111, projection="mollweide")
# FIXME: make a more general axes creation that works also with subplots
# ax = plt.gcf().add_axes((.125, .1, .9, .9), projection="mollweide")
# remove white space around the image
plt.subplots_adjust(left=0.02, right=0.98, top=0.95, bottom=0.05)
if graticule and graticule_labels:
plt.subplots_adjust(left=0.04, right=0.98, top=0.95, bottom=0.05)
if not m is None:
# auto min and max
if min is None:
min = m.min()
if max is None:
max = m.max()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop("hold", None)
if hold is not None:
ax.hold(hold)
try:
ysize = xsize / 2
theta = np.linspace(np.pi, 0, ysize)
phi = np.linspace(-np.pi, np.pi, xsize)
longitude = np.radians(np.linspace(-180, 180, xsize))
if flip == "astro":
longitude = longitude[::-1]
latitude = np.radians(np.linspace(-90, 90, ysize))
# project the map to a rectangular matrix xsize x ysize
PHI, THETA = np.meshgrid(phi, theta)
# coord or rotation
if coord or rot:
r = Rotator(coord=coord, rot=rot, inv=True)
THETA, PHI = r(THETA.flatten(), PHI.flatten())
THETA = THETA.reshape(ysize, xsize)
PHI = PHI.reshape(ysize, xsize)
nside = npix2nside(len(m))
if not m is None:
grid_pix = ang2pix(nside, THETA, PHI, nest=nest)
grid_map = m[grid_pix]
# plot
ret = plt.pcolormesh(
longitude,
latitude,
grid_map,
vmin=min,
vmax=max,
rasterized=True,
**kwargs
)
# graticule
plt.grid(graticule)
if graticule:
longitude_grid_spacing = 60 # deg
ax.set_longitude_grid(longitude_grid_spacing)
if width < 10:
ax.set_latitude_grid(45)
ax.set_longitude_grid_ends(90)
if graticule_labels:
ax.xaxis.set_major_formatter(ThetaFormatterShiftPi(longitude_grid_spacing))
else:
# remove longitude and latitude labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# colorbar
if cbar and not m is None:
cb = fig.colorbar(
ret, orientation="horizontal", shrink=0.4, pad=0.05, ticks=[min, max]
)
cb.ax.xaxis.set_label_text(unit)
cb.ax.xaxis.labelpad = -8
# workaround for issue with viewers, see colorbar docstring
cb.solids.set_edgecolor("face")
plt.draw()
finally:
ax.hold(washold)
return ret
def projplot(theta, phi, fmt=None, **kwargs):
"""projplot is a wrapper around :func:`matplotlib.Axes.plot` to take into account the
spherical projection.
You can call this function as::
projplot(theta, phi) # plot a line going through points at coord (theta, phi)
projplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projscatter, projtext
"""
import matplotlib.pyplot as plt
longitude, latitude = lonlat(theta, phi)
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret
| gpl-2.0 |
hammerlab/vaxrank | vaxrank/gene_pathway_check.py | 1 | 5225 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, division
from collections import OrderedDict
from os.path import join, dirname
import pandas as pd
_ENSEMBL_GENE_ID_COLUMN_NAME = 'Ensembl Gene ID'
_MUTATION_COLUMN_NAME = 'Mutation'
_IFNG_RESPONSE_COLUMN_NAME = 'interferon_gamma_response'
_CLASS_I_MHC_COLUMN_NAME = 'class1_mhc_presentation_pathway'
_DRIVER_GENE_COLUMN_NAME = 'cancer_driver_gene'
_DRIVER_VARIANT_COLUMN_NAME = 'cancer_driver_variant'
_CURRENT_DIR = dirname(__file__)
_DATA_DIR = join(_CURRENT_DIR, "data")
class GenePathwayCheck(object):
"""
This class is meant for use with gene/variant list files from
https://github.com/openvax/gene-lists. Other files can be used as well, but
need to follow a similar column structure. Most logic is based on Ensembl
gene IDs.
Parameters
----------
interferon_gamma_response_csv : str, optional
Local path to interferon-gamma response CSV file.
class1_mhc_presentation_pathway_csv : str, optional
Local path to MHC class I presentation pathway CSV file.
cancer_driver_genes_csv : str, optional
Local path to cancer driver genes CSV file.
cancer_driver_variants_csv : str, optional
Local path to cancer driver variants CSV file.
"""
def __init__(
self,
interferon_gamma_response_csv=None,
class1_mhc_presentation_pathway_csv=None,
cancer_driver_genes_csv=None,
cancer_driver_variants_csv=None):
self.interferon_gamma_response_gene_set = self._load_set_from_csv(
csv_path=interferon_gamma_response_csv,
default_filename="interferon-gamma-response.csv",
description="Interferon gamma response pathway",
column_names=[_ENSEMBL_GENE_ID_COLUMN_NAME])
self.class1_mhc_presentation_pathway_gene_set = self._load_set_from_csv(
csv_path=class1_mhc_presentation_pathway_csv,
default_filename="class1-mhc-presentation-pathway.csv",
description="Class I MHC presentation pathway",
column_names=[_ENSEMBL_GENE_ID_COLUMN_NAME])
self.cancer_driver_genes_set = self._load_set_from_csv(
csv_path=cancer_driver_genes_csv,
default_filename="cancer-driver-genes.csv",
description="Cancer driver genes",
column_names=[_ENSEMBL_GENE_ID_COLUMN_NAME])
# set of gene ID, variant description pairs
self.cancer_driver_variants_set = self._load_set_from_csv(
csv_path=cancer_driver_variants_csv,
default_filename="cancer-driver-variants.csv",
description="Driver variants",
column_names=[_ENSEMBL_GENE_ID_COLUMN_NAME, _MUTATION_COLUMN_NAME])
@classmethod
def _load_set_from_csv(cls, csv_path, default_filename, description, column_names):
if not csv_path:
csv_path = join(_DATA_DIR, default_filename)
df = pd.read_csv(csv_path)
columns = []
for column_name in column_names:
if column_name not in df.columns:
raise ValueError("%s file (%s) needs column '%s'" % (
description,
csv_path,
column_name))
columns.append(df[column_name].values)
if len(columns) == 1:
return set(columns[0])
else:
return set(zip(*columns))
def make_variant_dict(self, variant):
"""
Returns a dictionary of boolean values, depending on whether we see this
variant in any relevant pathway or cancer driver files.
Parameters
----------
variant : varcode.Variant
Variant object to evaluate
"""
effect_description = variant.effects().top_priority_effect().short_description
overlapping_gene_ids = variant.gene_ids
variant_dict = OrderedDict()
variant_dict[_IFNG_RESPONSE_COLUMN_NAME] = any([
gene_id in self.interferon_gamma_response_gene_set
for gene_id in overlapping_gene_ids
])
variant_dict[_CLASS_I_MHC_COLUMN_NAME] = any([
gene_id in self.class1_mhc_presentation_pathway_gene_set
for gene_id in overlapping_gene_ids
])
variant_dict[_DRIVER_GENE_COLUMN_NAME] = any([
gene_id in self.cancer_driver_genes_set
for gene_id in overlapping_gene_ids
])
variant_dict[_DRIVER_VARIANT_COLUMN_NAME] = any([
(gene_id, effect_description) in self.cancer_driver_variants_set
for gene_id in overlapping_gene_ids
])
return variant_dict
| apache-2.0 |
RPGOne/scikit-learn | examples/classification/plot_lda_qda.py | 30 | 5150 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
alpha = 0.5
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha,
color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha,
color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha,
color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha,
color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, facecolor=color, edgecolor='yellow',
linewidth=2, zorder=2)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.