repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cl4rke/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
lllcho/CAPTCHA-breaking | keras-master/examples/kaggle_otto_nn.py | 70 | 3775 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| mit |
sctigercat1/panda3d | direct/src/ffi/panda3d.py | 10 | 9620 | #!/bin/true
import os, sys, imp
panda3d_modules = {
"core" :("libpandaexpress", "libpanda"),
"dtoolconfig" : "libp3dtoolconfig",
"physics" : "libpandaphysics",
"fx" : "libpandafx",
"direct" : "libp3direct",
"egg" : "libpandaegg",
"ode" : "libpandaode",
"bullet" : "libpandabullet",
"vision" : "libp3vision",
"physx" : "libpandaphysx",
"ai" : "libpandaai",
"awesomium" : "libp3awesomium",
"speedtree" : "libpandaspeedtree",
"rocket" :("_rocketcore", "_rocketcontrols", "libp3rocket"),
"vrpn" : "libp3vrpn",
}
class panda3d_import_manager:
# Important: store a reference to the sys and os modules, as
# all references in the global namespace will be reset.
os = os
sys = sys
imp = imp
__libraries__ = {}
# Figure out the dll suffix (commonly, _d for windows debug builds),
# and the dll extension.
dll_suffix = ''
dll_exts = ('.pyd', '.so')
if sys.platform == "win32":
dll_exts = ('.pyd', '.dll')
# We allow the caller to preload dll_suffix into the sys module.
dll_suffix = getattr(sys, 'dll_suffix', None)
if dll_suffix is None:
# Otherwise, we try to determine it from the executable name:
# python_d.exe implies _d across the board.
dll_suffix = ''
if sys.executable.endswith('_d.exe'):
dll_suffix = '_d'
# On OSX, extension modules can be loaded from either .so or .dylib.
if sys.platform == "darwin":
dll_exts = ('.pyd', '.so', '.dylib')
prepared = False
@classmethod
def __prepare(cls):
# This method only needs to be called once.
if cls.prepared:
return
cls.prepared = True
# First, we must ensure that the library path is
# modified to locate all of the dynamic libraries.
target = None
filename = "libpandaexpress" + cls.dll_suffix
for dir in cls.sys.path + [cls.sys.prefix]:
lib = cls.os.path.join(dir, filename)
for dll_ext in cls.dll_exts:
if (cls.os.path.exists(lib + dll_ext)):
target = dir
break
if target == None:
raise ImportError("Cannot find %s" % (filename))
target = cls.os.path.abspath(target)
# And add that directory to the system library path.
if cls.sys.platform == "win32":
cls.__prepend_to_path("PATH", target)
else:
cls.__prepend_to_path("LD_LIBRARY_PATH", target)
if cls.sys.platform == "darwin":
cls.__prepend_to_path("DYLD_LIBRARY_PATH", target)
@classmethod
def __prepend_to_path(cls, varname, target):
""" Prepends the given directory to the
specified search path environment variable. """
# Get the current value
if varname in cls.os.environ:
path = cls.os.environ[varname].strip(cls.os.pathsep)
else:
path = ""
# Prepend our value, if it's not already the first thing
if len(path) == 0:
cls.os.environ[varname] = target
elif not path.startswith(target):
cls.os.environ[varname] = target + cls.os.pathsep + path
@classmethod
def libimport(cls, name):
""" Imports and returns the specified library name. The
provided library name has to be without dll extension. """
if name in cls.__libraries__:
return cls.__libraries__[name]
if not cls.prepared: cls.__prepare()
# Try to import it normally first.
try:
return __import__(name)
except ImportError:
_, err, _ = cls.sys.exc_info()
if str(err) != "No module named " + name and \
str(err) != "No module named '%s'" % name:
raise
# Hm, importing normally didn't work. Let's try imp.load_dynamic.
# But first, locate the desired library.
target = None
filename = name + cls.dll_suffix
for dir in cls.sys.path + [cls.sys.prefix]:
lib = cls.os.path.join(dir, filename)
for dll_ext in cls.dll_exts:
if (cls.os.path.exists(lib + dll_ext)):
target = lib + dll_ext
break
if target:
# Once we find the first match, break all the way
# out--don't keep looking for a second match.
break
if target == None:
message = "DLL loader cannot find %s." % name
raise ImportError(message)
target = cls.os.path.abspath(target)
# Now import the file explicitly.
lib = cls.imp.load_dynamic(name, target)
cls.__libraries__[name] = lib
return lib
class panda3d_submodule(type(sys)):
""" Represents a submodule of 'panda3d' that represents a dynamic
library. This dynamic library is loaded when something is accessed
from the module. """
__manager__ = panda3d_import_manager
def __init__(self, name, library):
type(sys).__init__(self, "panda3d." + name)
self.__library__ = library
self.__libraries__ = [self.__library__]
def __load__(self):
""" Forces the library to be loaded right now. """
self.__manager__.libimport(self.__library__)
def __getattr__(self, name):
mod = self.__manager__.libimport(self.__library__)
if name == "__all__":
everything = []
for obj in mod.__dict__.keys():
if not obj.startswith("__"):
everything.append(obj)
self.__all__ = everything
return everything
elif name == "__library__":
return self.__library__
elif name == "__libraries__":
return self.__libraries__
elif name in mod.__dict__.keys():
value = mod.__dict__[name]
setattr(self, name, value)
return value
# Not found? Raise the error that Python would normally raise.
raise AttributeError("'module' object has no attribute '%s'" % name)
class panda3d_multisubmodule(type(sys)):
""" Represents a submodule of 'panda3d' that represents multiple
dynamic libraries. These are loaded when something is accessed
from the module. """
__manager__ = panda3d_import_manager
def __init__(self, name, libraries):
type(sys).__init__(self, "panda3d." + name)
self.__libraries__ = libraries
def __load__(self):
""" Forces the libraries to be loaded right now. """
err = []
for lib in self.__libraries__:
try:
self.__manager__.libimport(lib)
except ImportError:
_, msg, _ = self.__manager__.sys.exc_info()
err.append(str(msg).rstrip('.'))
if len(err) > 0:
raise ImportError(', '.join(err))
def __getattr__(self, name):
if name == "__all__":
everything = []
for lib in self.__libraries__:
for obj in self.__manager__.libimport(lib).__dict__:
if not obj.startswith("__"):
everything.append(obj)
self.__all__ = everything
return everything
elif name == "__libraries__":
return self.__libraries__
for lib in self.__libraries__:
mod = self.__manager__.libimport(lib)
if name in mod.__dict__:
value = mod.__dict__[name]
setattr(self, name, value)
return value
# Not found? Raise the error that Python would normally raise.
raise AttributeError("'module' object has no attribute '%s'" % name)
class panda3d_module(type(sys)):
""" Represents the main 'panda3d' module. """
__file__ = __file__
modules = panda3d_modules
__manager__ = panda3d_import_manager
def __load__(self):
""" Force all the libraries to be loaded right now. """
err = []
for module in self.modules:
try:
self.__manager__.sys.modules["panda3d.%s" % module].__load__()
except ImportError:
_, msg, _ = self.__manager__.sys.exc_info()
err.append(str(msg).rstrip('.'))
if len(err) > 0:
raise ImportError(', '.join(err))
def __getattr__(self, name):
if name == "__all__":
self.__all__ = name
return self.modules.keys()
elif name == "__file__":
return self.__file__
elif name in self.modules:
value = self.__manager__.sys.modules["panda3d.%s" % name]
setattr(self, name, value)
return value
# Not found? Raise the error that Python would normally raise.
raise AttributeError("'module' object has no attribute '%s'" % name)
# Create the fake module objects and insert them into sys.modules.
this = panda3d_module("panda3d")
# Loop through the module dictionary, create a fake
# module for each of them, and insert them into
# sys.modules and into the 'panda3d' fake module.
for mod, lib in panda3d_modules.items():
if isinstance(lib, tuple):
module = panda3d_multisubmodule(mod, lib)
else:
module = panda3d_submodule(mod, lib)
sys.modules["panda3d." + mod] = module
this.__dict__[mod] = module
# Important: this must be the last thing in this file
sys.modules["panda3d"] = this
| bsd-3-clause |
glennq/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 73 | 1854 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
aewhatley/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
theunissenlab/python-neo | doc/source/images/generate_diagram.py | 6 | 7653 | # -*- coding: utf-8 -*-
"""
This generate diagram in .png and .svg from neo.core
Author: sgarcia
"""
from datetime import datetime
import numpy as np
import quantities as pq
from matplotlib import pyplot
from matplotlib.patches import Rectangle, ArrowStyle, FancyArrowPatch
from matplotlib.font_manager import FontProperties
from neo.test.generate_datasets import fake_neo
line_heigth = .22
fontsize = 10.5
left_text_shift = .1
dpi = 100
def get_rect_height(name, obj):
'''
calculate rectangle height
'''
nlines = 1.5
nlines += len(getattr(obj, '_all_attrs', []))
nlines += len(getattr(obj, '_single_child_objects', []))
nlines += len(getattr(obj, '_multi_child_objects', []))
nlines += len(getattr(obj, '_multi_parent_objects', []))
return nlines*line_heigth
def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
arrowprops = dict(arrowstyle='fancy',
#~ patchB=p,
shrinkA=.3, shrinkB=.3,
fc=color, ec=color,
connectionstyle=connectionstyle,
alpha=alpha)
bbox = dict(boxstyle="square", fc="w")
a = ax.annotate('', coord1, coord2,
#xycoords="figure fraction",
#textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=arrowprops,
bbox=bbox)
a.set_zorder(-4)
def calc_coordinates(pos, height):
x = pos[0]
y = pos[1] + height - line_heigth*.5
return pos[0], y
def generate_diagram(filename, rect_pos, rect_width, figsize):
rw = rect_width
fig = pyplot.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
all_h = {}
objs = {}
for name in rect_pos:
objs[name] = fake_neo(name)
all_h[name] = get_rect_height(name, objs[name])
# draw connections
color = ['c', 'm', 'y']
alpha = [1., 1., 0.3]
for name, pos in rect_pos.items():
obj = objs[name]
relationships = [getattr(obj, '_single_child_objects', []),
getattr(obj, '_multi_child_objects', []),
getattr(obj, '_child_properties', [])]
for r in range(3):
for ch_name in relationships[r]:
x1, y1 = calc_coordinates(rect_pos[ch_name], all_h[ch_name])
x2, y2 = calc_coordinates(pos, all_h[name])
if r in [0, 2]:
x2 += rect_width
connectionstyle = "arc3,rad=-0.2"
elif y2 >= y1:
connectionstyle = "arc3,rad=0.7"
else:
connectionstyle = "arc3,rad=-0.7"
annotate(ax=ax, coord1=(x1, y1), coord2=(x2, y2),
connectionstyle=connectionstyle,
color=color[r], alpha=alpha[r])
# draw boxes
for name, pos in rect_pos.items():
htotal = all_h[name]
obj = objs[name]
allrelationship = (list(getattr(obj, '_child_containers', [])) +
list(getattr(obj, '_multi_parent_containers', [])))
rect = Rectangle(pos, rect_width, htotal,
facecolor='w', edgecolor='k', linewidth=2.)
ax.add_patch(rect)
# title green
pos2 = pos[0], pos[1]+htotal - line_heigth*1.5
rect = Rectangle(pos2, rect_width, line_heigth*1.5,
facecolor='g', edgecolor='k', alpha=.5, linewidth=2.)
ax.add_patch(rect)
# single relationship
relationship = getattr(obj, '_single_child_objects', [])
pos2 = pos[1] + htotal - line_heigth*(1.5+len(relationship))
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='c', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# multi relationship
relationship = (list(getattr(obj, '_multi_child_objects', [])) +
list(getattr(obj, '_multi_parent_containers', [])))
pos2 = (pos[1]+htotal - line_heigth*(1.5+len(relationship)) -
rect_height)
rect_height = len(relationship)*line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='m', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# necessary attr
pos2 = (pos[1]+htotal -
line_heigth*(1.5+len(allrelationship) +
len(obj._necessary_attrs)))
rect = Rectangle((pos[0], pos2), rect_width,
line_heigth*len(obj._necessary_attrs),
facecolor='r', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# name
if hasattr(obj, '_quantity_attr'):
post = '* '
else:
post = ''
ax.text(pos[0]+rect_width/2., pos[1]+htotal - line_heigth*1.5/2.,
name+post,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize+2,
fontproperties=FontProperties(weight='bold'),
)
#relationship
for i, relat in enumerate(allrelationship):
ax.text(pos[0]+left_text_shift, pos[1]+htotal - line_heigth*(i+2),
relat+': list',
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
# attributes
for i, attr in enumerate(obj._all_attrs):
attrname, attrtype = attr[0], attr[1]
t1 = attrname
if (hasattr(obj, '_quantity_attr') and
obj._quantity_attr == attrname):
t1 = attrname+'(object itself)'
else:
t1 = attrname
if attrtype == pq.Quantity:
if attr[2] == 0:
t2 = 'Quantity scalar'
else:
t2 = 'Quantity %dD' % attr[2]
elif attrtype == np.ndarray:
t2 = "np.ndarray %dD dt='%s'" % (attr[2], attr[3].kind)
elif attrtype == datetime:
t2 = 'datetime'
else:
t2 = attrtype.__name__
t = t1+' : '+t2
ax.text(pos[0]+left_text_shift,
pos[1]+htotal - line_heigth*(i+len(allrelationship)+2),
t,
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
xlim, ylim = figsize
ax.set_xlim(0, xlim)
ax.set_ylim(0, ylim)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(filename, dpi=dpi)
def generate_diagram_simple():
figsize = (18, 12)
rw = rect_width = 3.
bf = blank_fact = 1.2
rect_pos = {'Block': (.5+rw*bf*0, 4),
'Segment': (.5+rw*bf*1, .5),
'Event': (.5+rw*bf*4, 3.0),
'Epoch': (.5+rw*bf*4, 1.0),
'ChannelIndex': (.5+rw*bf*1, 7.5),
'Unit': (.5+rw*bf*2., 9.9),
'SpikeTrain': (.5+rw*bf*3, 7.5),
'IrregularlySampledSignal': (.5+rw*bf*3, 0.5),
'AnalogSignal': (.5+rw*bf*3, 4.9),
}
generate_diagram('simple_generated_diagram.svg',
rect_pos, rect_width, figsize)
generate_diagram('simple_generated_diagram.png',
rect_pos, rect_width, figsize)
if __name__ == '__main__':
generate_diagram_simple()
pyplot.show()
| bsd-3-clause |
SCP-028/UGA | misc/courses/BINF8950_capstone.py | 1 | 11907 | import os
import sys
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offline
import seaborn as sns
import statsmodels.api as sm
from sklearn.decomposition import PCA
TEST_METHOD: bool = False
TEST_FOUR_DAYS: bool = False # cannot be True if TEST_METHOD is True
INTERPOLATE_DATA: bool = True # cannot be True if TEST_METHOD is True
TIME_WINDOW: int = 4
USE_FOURIER_TRANSFORM: bool = False
FOURIER_COEF_NUM: int = 15
HODRICK_PRESCOTT_LAMBDA: int = 15000
try:
assert sys.version_info.major == 3
assert sys.version_info.minor > 5
except AssertionError:
raise RuntimeError("This code requires Python 3.6+.")
def interpolate_data(df):
"""Interpolate to hourly data so that FFT works correctly.
Parameters
----------
df: pandas DataFrame
Index([
'timepoint', 'elapsed_time', 'temperature',
'experiment', 'case', 'id', 'condition'])
Returns
-------
A interpolated long DataFrame.
"""
df_interp = pd.DataFrame(columns=df.columns)
for uuid in ids:
df_uuid = df[df["id"] == uuid][["timepoint", "temperature"]]
timepoint = pd.date_range(
start=df_uuid.timepoint.min(),
end=df_uuid.timepoint.max(),
freq="30min"
)
df_return = pd.DataFrame(dict(timepoint=timepoint, id=uuid))
df_return = df_return.join(df_uuid.set_index("timepoint"), on="timepoint", how="left")
df_return = df_return.interpolate(method="cubic")
df_interp = pd.concat([df_interp, df_return], ignore_index=True)
return df_interp
def hodrick_prescott(df: pd.DataFrame, lamb: int=1600) -> pd.DataFrame:
"""Use the Hodrick-Prescott Filter to estimate the trend.
Parameters
----------
df: pandas DataFrame
The temperature data at different timepoints
lamb: float, optional
The Hodrick-Prescott smoothing parameter. The larger it is the
smoother the outcome gets.
Returns
-------
df with a new column containing the trend, and the de-trended data.
"""
df = df.sort_values("timepoint")
_, df["trend"] = sm.tsa.filters.hpfilter(df.temperature, lamb=lamb)
df["detrended"] = df["temperature"] - df["trend"]
return df
def fourier_transform(dfs, t: int=2, n: int=10, detrend: bool=True) -> pd.DataFrame:
"""Perform a Fourier transform on the de-trended data.
Parameters
----------
dfs: list of pandas DataFrames
trend_list: a series of output from the function `hodrick_prescott`.
t: int, optional
The time window to scan df.
n: int, optional
The number of coefficients in the output.
detrend: bool, optional
Whether to use de-trended data or the original temperature data.
Returns
-------
The Fast Fourier Transformed matrix of coefficients.
"""
M_matrix = pd.DataFrame(columns=[f"c{i+1}" for i in range(n)])
for df, uuid in zip(dfs, ids):
if detrend:
df = df.loc[:, ["timepoint", "detrended"]]
else:
df = df.loc[:, ["timepoint", "temperature"]]
df["measure_date"] = df.timepoint.apply(lambda x: x.date())
time_windows = df["measure_date"].unique()
time_windows = [time_windows[i:i + t] for i in range(len(time_windows) - t + 1)]
for time_window in time_windows:
data = df.loc[df["measure_date"].isin(time_window)]
date_range = f"{str(time_window.min())[-5:]}_{str(time_window.max())[-5:]}"
if detrend:
M_matrix.loc[f"{uuid}_{date_range}"] = abs(np.fft.fft(a=data["detrended"], n=n))
else:
M_matrix.loc[f"{uuid}_{date_range}"] = abs(np.fft.fft(a=data["temperature"], n=n))
return M_matrix
def fourier_series(dfs, t: int=2, n: int=10, detrend: bool=True) -> pd.DataFrame:
"""Perform a Fourier cosine series on the data.
Parameters
----------
dfs: list of pandas DataFrames
trend_list: a series of output from the function `hodrick_prescott`.
t: int, optional
The time window (days) to scan df.
n: int, optional
The number of coefficients in the output.
detrend: bool, optional
Whether to use de-trended data or the original temperature data.
Returns
-------
S_matrices: A long DataFrame w/ columns timepoint, id, fcs, and temperature / detrended.
M_matrix: The Fourier Cosine Transformed matrix.
"""
def integrator(y, x):
I = 0
for i in range(0, len(y) - 2, 2):
a = x.iloc[i]
b = x.iloc[i + 2]
beginning = y.iloc[i]
middle = y.iloc[i + 1]
ending = y.iloc[i + 2]
I += (b - a) / 6 * (beginning + 4 * middle + ending)
return I
def calc_series(n, x, y):
L = x.iloc[-1] - x.iloc[0]
# S = y.mean()
S = 1 / L * integrator(y, x)
c = []
for i in range(1, n + 1):
p = y * np.cos(i * np.pi * x / L)
q = np.cos(i * np.pi * x / L) ** 2
c.append(integrator(p, x) / integrator(q, x))
S += c[i - 1] * np.cos(i * np.pi * x / L) # S should be an array w/ the same len as x
return dict(S=S, c=c)
S_matrices = pd.DataFrame()
M_matrix = pd.DataFrame(columns=[f"c{i+1}" for i in range(n)])
for df, uuid in zip(dfs, ids):
df = df.sort_values("timepoint")
df["measure_date"] = df.timepoint.apply(lambda x: x.date())
time_windows = df["measure_date"].unique()
time_windows = [time_windows[i:i + t] for i in range(len(time_windows) - t + 1)]
for time_window in time_windows:
data = df.loc[df["measure_date"].isin(time_window)]
date_range = f"{str(time_window.min())[-5:]}_{str(time_window.max())[-5:]}"
x = data["timepoint"]
x = (x - x.min()) / np.timedelta64(1, "h")
if detrend:
y = data["detrended"]
else:
y = data["temperature"]
series = calc_series(n, x, y)
M_matrix.loc[f"{uuid}_{date_range}"] = series["c"]
S_matrix = pd.DataFrame(dict(
timepoint=data["timepoint"],
fcs=series["S"],
id=f"{uuid}_{date_range}"
))
if detrend:
S_matrix["detrended"] = y
else:
S_matrix["temperature"] = y
S_matrices = S_matrices.append(S_matrix)
return S_matrices, M_matrix
def tsplot(df, uuid, trend=True, detrended=True, save_image=False):
"""
Parameters
----------
df: pandas DataFrame
Output of function `hodrick_prescott`.
uuid: str
Experiment and case identifier.
save_image: bool, optional
Whether to download the figure as a static file.
Returns
-------
A long DataFrame containing the temperature, trend,
and variance and mean of the deviation.
"""
fig = {
'data': [
{
'x': df['timepoint'],
'y': df["temperature"],
'name': "Temperature",
'opacity': 0.7,
'mode': 'lines+markers'
}
],
'layout': {
"title": uuid,
'xaxis': {'title': 'Date'},
'yaxis': {'title': "Temperature"}
}
}
if trend:
fig["data"].append(
{
'x': df['timepoint'],
'y': df["trend"],
'name': "Hodrick-Prescott filter trend",
'opacity': 0.7
}
)
if detrended:
fig["data"][0] = {
'x': df['timepoint'],
'y': df["detrended"],
'name': "De-trended",
"yaxis": "y2",
'opacity': 0.7
}
fig["layout"]["yaxis2"] = {
"title": "Temperature",
"overlaying": "y",
"side": "right"
}
if save_image:
offline.iplot(fig, filename=f"{uuid}", show_link=False, image="png")
else:
offline.iplot(fig, filename=f"{uuid}", show_link=False)
return fig
# Read temperature data:
if TEST_METHOD:
import matplotlib.pyplot as plt
np.random.seed(0)
df = pd.DataFrame()
for uuid in ["group1_", "group2_", "group3_"]:
x = pd.date_range('1/1/2018', periods=120, freq='H')
xe = (x - x.min()) / np.timedelta64(1, "h")
noise = np.random.normal(0, 1, len(x))
s = np.std(np.cos(xe) * xe) / np.random.normal()
y = np.cos(xe) * xe + s * noise
df = df.append(pd.DataFrame(
dict(timepoint=x,
elapsed_time=xe,
temperature=y,
id=uuid
)))
plt.plot(x, y)
else:
df = pd.read_csv("malaria.csv")
df["timepoint"] = pd.to_datetime(df["timepoint"])
df["id"] = df[["experiment", "case"]].apply("_".join, axis=1)
df = df.loc[:, ["timepoint", "temperature", "id"]]
ids = df["id"].unique()
df.head()
if TEST_FOUR_DAYS:
res = pd.DataFrame()
for uuid in ids:
x = df[df["id"] == uuid]
first_four_days = x["timepoint"].min() + pd.DateOffset(3)
x = x.loc[x.timepoint <= first_four_days]
res = res.append(x)
df = res
# Interpolate temperature data and find when the fevers happened:
if INTERPOLATE_DATA:
df = interpolate_data(df)
# Calculate the trend using the Hodrick-Prescott filter:
trend_list = []
for uuid in ids:
x = df[df["id"] == uuid][["timepoint", "temperature"]]
trend_list.append(hodrick_prescott(x, lamb=HODRICK_PRESCOTT_LAMBDA))
figs = [tsplot(x, uuid, trend=True, detrended=False, save_image=False) for x, uuid in zip(trend_list, ids) if uuid in ["E30_RKy15", "E06_RIh16", "E07B_11C166"]]
# Check the de-trended data:
if USE_FOURIER_TRANSFORM:
M_matrix = fourier_transform(trend_list, t=TIME_WINDOW, n=FOURIER_COEF_NUM, detrend=True)
else:
S_matrix, M_matrix = fourier_series(trend_list, t=TIME_WINDOW, n=FOURIER_COEF_NUM, detrend=True)
dfm = S_matrix.loc[S_matrix["id"].str.startswith("E30_RKy15")]
dfm = dfm.groupby("timepoint").mean().reset_index()
fig = {
'data': [
go.Scatter(
x=pd.to_datetime(dfm['timepoint']),
y=dfm['detrended'],
name="Temperature",
opacity=0.7
),
go.Scatter(
x=pd.to_datetime(dfm["timepoint"]),
y=dfm["fcs"],
name="Fourier cosine series",
opacity=0.7
)
],
'layout': {
'xaxis': {'title': 'Date'},
'yaxis': {'title': "Temperature"},
'title': "E30 RKy15"
}
}
offline.iplot(fig, show_link=False, image="png")
M_matrix.head()
pca = PCA(n_components=2, random_state=0)
c = pca.fit(M_matrix.T)
x, y = c.components_
print(f"Explained variance ratio: {c.explained_variance_ratio_}")
projection = pca.inverse_transform(pca.transform(M_matrix.T))
loss = ((M_matrix.T - projection) ** 2).mean()
loss.head()
obs = M_matrix.index.tolist()
data = pd.DataFrame(dict(PC1=x, PC2=y), index=obs)
data["group"] = [x.split("_")[0] for x in data.index]
fig = sns.lmplot(
data=data, x="PC1", y="PC2", hue="group", size=10,
fit_reg=False, scatter_kws={'alpha': 0.7}, markers=["o", "x", "s"]
)
| apache-2.0 |
mlhhu2017/identifyDigit | julian/mnist_util.py | 3 | 5121 | # coding: utf-8
from mnist import MNIST
import math
import numpy as np
import itertools
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from random import randint
def get_np_array(path='data'):
"""
Get images and install converter:
1. install MNIST from the command line with 'pip install python-mnist'
2. download the data from http://yann.lecun.com/exdb/mnist/
3. extract the .gz files and rename '.' to '-' in the file names
converts mnist images to ndarrays
inputs:
path optional, path to the mnist files (default=='data')
outputs:
train 2d-array with shape (60000,784), training images
train_labels 1d-array with shape (60000,), training labels
test 2d-array with shape (10000,784), test images
test_labels 1d-array with shape (10000,), test labels
"""
mndata = MNIST(path)
train, train_labels = mndata.load_training()
test, test_labels = mndata.load_testing()
return np.array(train), np.array(train_labels), np.array(test), np.array(test_labels)
def show_a_num(num):
"""
Plots a single number
inputs:
num takes 1d-array with shape (784,) containing a single image
outputs:
img matplotlib image
"""
pixels = num.reshape((28,28))
img = plt.imshow(pixels, cmap='gray')
plt.axis("off")
return img
def show_nums(data, nrow=None, xsize=15, ysize=15):
"""
Plots multiple numbers in a "grid"
inputs:
data takes 2d-array with shape (n,784) containing images
nrow optional, number of rows in the output image (default == ceil(sqrt(n)))
xsize optional, specifies output image length in inches (default == 15)
ysize optional, specifies output image height in inches (default == 15)
outputs:
img matplotlib image
"""
n = len(data)
# check if at least one image
if n < 1:
raise ValueError("No image given!")
# if only 1 image print it
if len(data.shape) == 1:
return show_a_num(data)
# number of rows specified?
if nrow == None:
# calculate default
ncol = math.ceil(math.sqrt(n))
nrow = math.ceil(n/ncol)
else:
# calculate number of columns
ncol = math.ceil(n/nrow)
# check if enough images
missing = nrow*ncol - n
if missing != 0:
# fill up with black images
zeros = np.zeros(missing*784)
zeros = zeros.reshape(missing,784)
data = np.vstack((data,zeros))
# reshape the data to the desired output
data = data.reshape((-1,28,28))
data = data.reshape((nrow,-1,28,28)).swapaxes(1,2)
data = data.reshape((nrow*28,-1))
plt.figure(figsize=(xsize,ysize))
img = plt.imshow(data, cmap='gray')
plt.axis("off")
return img
def get_one_num(data, labels, num):
"""
Creates 2d-array containing only images of a single number
inputs:
data takes 2d-array with shape (n,784) containing the images
labels takes 1d-array with shape (n,) containing the labels
num the number you want to filter
outputs:
arr 2d-array only containing a images of num
"""
return np.array([val for idx, val in enumerate(data) if labels[idx] == num])
def get_all_nums(data, labels):
"""
Creates a 1d-array containing 2d-arrays of images for every number
ex. arr[0] = 2d-array containing all images of number 0
inputs:
data takes 2d-array with shape (n,784) containing the images
labels takes 1d-array with shape (n,) containing the labels
outputs:
arr 1d-array containing 2d-arrays for every number
"""
return np.array([get_one_num(data, labels, i) for i in range(10)])
def plot_confusion_matrix(cm, classes, normalize=False,
title='Confusion matrix', cmap=plt.cm.Blues):
"""
Plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
inputs:
cm confusion matrix
classes name of classes
normalize optional, normalize matrix to show percentages (default == False)
title title of the plot (default == 'Confusion matrix')
cmap colormap (default == blue colormap)
outputs:
void
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| mit |
johnmgregoire/NanoCalorimetry | batchSC_singlecellproc.py | 1 | 4341 | import time
import os
import sys
import numpy
import h5py
#from PnSC_ui import *
#from PnSC_dataimport import *
from PnSC_SCui import *
#from PnSC_math import *
from PnSC_h5io import *
from PnSC_main import *
from matplotlib.ticker import FuncFormatter
def myexpformat(x, pos):
for ndigs in range(2):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
def make_ticklabels_invisible(ax, x=True, y=True):
if x:
for tl in ax.get_xticklabels():
tl.set_visible(False)
if y:
for tl in ax.get_yticklabels():
tl.set_visible(False)
nskip=100
#p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5'
p=mm.h5path
#f=h5py.File(p, mode='r+')
#f=h5py.File(p, mode='r')
savef='C:/Users/JohnnyG/Documents/HarvardWork/MG/PnSCplots/batchplotbycell_May17'
allsegdict=[]
selectcell=11
# 0 1 2 3 4 5
heatlist=['heat1a', 'heat2', 'heat3', 'heat4a', 'heat7', 'heat8']
for exp in heatlist:
f, hppaths=experimenthppaths(p, exp)
f.close()
for hpp in hppaths:
h5hpname=hpp.rpartition('/')[2]
f, g=gethpgroup(p, exp, h5hpname=h5hpname)
cell=g.attrs['CELLNUMBER']
f.close()
if cell!=selectcell:
continue
hpsdl=CreateHeatProgSegDictList(p, exp, h5hpname)
allsegdict+=[hpsdl]
def heatrate_T(d, T, Twin=5.):
#i=numpy.argmin((T-d['sampletemperature'])**2)
Ta=d['sampletemperature'][0]
x=numpy.where((Ta>=T-Twin)&(Ta<=T+Twin))[0]
prev=numpy.array([not (t-1 in x) for t in x])
previ=numpy.where(prev)[0]
stopi=numpy.append(previ[1:],len(x))
longestbunchind=numpy.argmax(stopi-previ)
inds=x[previ[longestbunchind]:stopi[longestbunchind]]
return d['sampleheatrate'][0][inds].mean()
cool180=[]
cool400=[]
heat200500=[]
heati_heatseg_prevcooli_prevcoolseg=[(0, 2, -1, -1), (1, 2, 0, 4), (2, 2, 1, 4), (3, 2, 2, 4), (5, 2, 4, 3)]
for hi, hseg, ci, cseg in heati_heatseg_prevcooli_prevcoolseg:
if ci<0:
cool180+=[-1.e2]
cool400+=[-1.e2]
else:
cool180+=[heatrate_T(allsegdict[ci][cseg], 180.)]
cool400+=[heatrate_T(allsegdict[ci][cseg], 400.)]
heat200500+=[heatrate_T(allsegdict[hi][hseg], 350., Twin=150.)]
orderarray=numpy.abs(numpy.array(cool400))
cols=['k', 'b', 'g', 'r', 'c', 'm']
mult=1.e6
nplots=len(orderarray)
axl=[pylab.subplot(nplots, 1, nplots)]
for i in range(1, nplots):
#ax=pylab.subplot2grid((n, 3), (n-1-i, 0), colspan=2, sharex=axl[0], sharey=axl[0])
ax=pylab.subplot(nplots, 1, nplots-i, sharex=axl[0], sharey=axl[0])
pylab.setp(ax.get_xticklabels(), visible=False)
axl+=[ax]
for count, i in enumerate(numpy.argsort(orderarray)):
hi, hseg, ci, cseg=heati_heatseg_prevcooli_prevcoolseg[i]
print hi, hseg, heatlist[hi], allsegdict[hi][hseg].keys()
axl[count].plot(allsegdict[hi][hseg]['sampletemperature'][0], mult*allsegdict[hi][hseg]['sampleheatcapacity'][0], cols[count]+'.', markersize=1, label=heatlist[i])
for ax in axl:
ax.set_ylim(-2.1, 4.9)
ax.set_yticks([-2, 0, 2, 4])
axl[2].set_ylabel(r'Heat Capacity ($\mu$J/K), endothermic ->', fontsize=14)
axl[0].set_xlabel('Temperature (C)', fontsize=14)
pylab.subplots_adjust(right=.95, top=0.95, hspace=0.01)
###plot cooling rates
#pylab.figure(figsize=(1.5, 8))
#for count, x in enumerate(numpy.sort(orderarray)):
# pylab.semilogx(numpy.abs(x), count, cols[count]+'o')
#make_ticklabels_invisible(pylab.gca(), x=False)
#pylab.xlabel('cooling rate at 400C (K/s)', fontsize=14)
#pylab.ylim(-.5, count+.5)
#pylab.show()
###extra stuff?
#pylab.ylim(t3, t4)
# pylab.xlabel('T (C)')
# pylab.ylabel('P / dT/dt')
# pylab.gca().yaxis.set_major_formatter(ExpTickLabels)
# pylab.subplots_adjust(left=.1, right=.97, top=.93, bottom=.08, wspace=.25, hspace=.25)
## pylab.show()
## idialog=messageDialog(title='continue')
## if not idialog.exec_():
## break
## break
# pylab.savefig(os.path.join(savef,'SCcellplot_cell%02d' %(cellcount+1)+'.png'))
# pylab.clf()
| bsd-3-clause |
google/qkeras | experimental/lo/generate_rf_code.py | 1 | 28407 | # Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates expressions for random trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
DEBUG = int(os.environ.get("DEBUG", 0))
PRINT_DEBUG = int(os.environ.get("PRINT_DEBUG", 0))
def gen_random_tree_regressor(
tree, code, bits, o_bits, o_decimal_digits, o_is_neg, bdd, offset, is_cc=True):
"""Generates HLS friendly C++ code for random tree regressor.
Generates HLS friendly C++ code for Catapult.
Arguments:
tree: decision tree regressor from SkLearn.
code: list of code lines to be append to.
bits: list containing number of bits for each of the inputs.
o_bits: number of bits for output.
o_decimal_digits: number of decimal digits (right of the decimal point
of o_bits for approximation of regressor in RandomTreeRegressor.
o_is_neg: True or 1 if output can be negative.
bdd: we actually try to cache entries (i,v,n1,n0) entries so that if
they appear again, we reuse previously computed nodes.
offset: each variable created in this function call is incremented by
offset.
is_cc: if True, generates C++, else Verilog.
Returns:
Tuple containing last variable name and current number of variables.
"""
# extract information from tree
n_nodes = tree.node_count
children_left = tree.children_left
children_right = tree.children_right
feature = tree.feature
threshold = tree.threshold
values = np.copy(tree.value)
o_suffix = ""
if DEBUG:
o_type = "float"
elif is_cc:
o_type = "ac_fixed<{},{},{}>".format(
o_bits + o_decimal_digits,
o_bits + o_is_neg,
o_is_neg)
else:
o_sign = " signed" if o_is_neg else ""
if o_bits + o_decimal_digits > 1:
o_suffix = "[{}:0]".format(o_bits + o_decimal_digits - 1)
o_type = "wire" + o_sign + " " + o_suffix
def round_digits(x, decimal_digits):
"""Rounds to decimal_digits to the right of the decimal point."""
if DEBUG:
return x
factor = (1 << decimal_digits) * 1.0
x = x * factor
return np.round(x) / factor
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)]
while stack:
node_id, parent_depth = stack.pop()
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth+1))
stack.append((children_right[node_id], parent_depth+1))
else:
is_leaves[node_id] = True
values[node_id] = round_digits(tree.value[node_id], o_decimal_digits)
if (
values[node_id].flatten()[0] != tree.value[node_id].flatten()[0] and
DEBUG
):
print(node_id, values[node_id].flatten()[0],
tree.value[node_id].flatten()[0])
v_name = {}
n_vars = offset
bdd = {}
def round_value_to_int(x):
v = hex(int(np.round(x * (1 << (o_decimal_digits)))))
if is_cc:
if DEBUG:
return str(x)
else:
return x
#v + " /* {} */".format(x)
else:
return (
str(o_bits + o_decimal_digits) + "'h" + v[2:] + " /* {} */".format(x)
)
if is_leaves[0]:
v_name[0] = round_value_to_int(values[0].flatten()[0])
code.append(" {} n_{} = {};".format(o_type, n_vars, v_name[0]))
last_var = "n_{}".format(n_vars)
n_vars += 1
else:
for i in range(n_nodes-1, -1, -1):
if is_leaves[i]:
continue
if v_name.get(children_left[i], None) is not None:
n1 = v_name[children_left[i]]
elif is_leaves[children_left[i]]:
n1 = round_value_to_int(values[children_left[i]].flatten()[0])
v_name[children_left[i]] = n1
else:
n1 = "n_" + str(n_vars)
n_vars += 1
v_name[children_left[i]] = n1
raise ValueError((children_left[i], n1, is_leaves[children_left[i]]))
if v_name.get(children_right[i], None) is not None:
n0 = v_name[children_right[i]]
elif is_leaves[children_right[i]]:
n0 = round_value_to_int(values[children_right[i]].flatten()[0])
v_name[children_right[i]] = n0
else:
n0 = "n_" + str(n_vars)
n_vars += 1
v_name[children_right[i]] = n0
raise ValueError((children_right[i], n0, is_leaves[children_right[i]]))
if v_name.get(i, None) is not None:
n = v_name[i]
last_var = v_name[i]
elif bdd.get((feature[i], threshold[i], n1, n0), None) is not None:
n = bdd[(feature[i], threshold[i], n1, n0)]
v_name[i] = n
last_var = n
elif n1 == n0:
# store intermediate results so that we can build a dag, not a tree
bdd[(feature[i], threshold[i], n1, n0)] = n1
v_name[i] = n1
last_var = n1
else:
n = "n_" + str(n_vars)
n_vars += 1
v_name[i] = n
# store intermediate results so that we can build a dag, not a tree
bdd[(feature[i], threshold[i], n1, n0)] = n
t = int(threshold[i])
if bits[feature[i]] == 1:
if t == 0:
n1, n0 = n0, n1
code.append(
" {} {} = (i_{}) ? {} : {}; // x_{} {}".format(
o_type, v_name[i], feature[i], n1, n0, i,
threshold[i]))
else:
code.append(
" {} {} = (i_{} <= {}) ? {} : {}; // x_{} {}".format(
o_type, v_name[i], feature[i], t, n1, n0, i,
threshold[i]))
last_var = v_name[i]
return (last_var, n_vars)
def entry_to_hex(entry, max_value, size, is_cc):
"""Converts class instance to hexa number."""
e_vector = [np.power(max_value+1, i) for i in range(len(entry)-1, -1, -1)]
entry = np.array(entry)
v = hex(np.sum(entry * e_vector))
if is_cc:
return v
else:
return str(size) + "'h" + v[2:] + " /* {} */".format(entry)
def gen_random_tree_classifier(
tree, code, bits, bdd, max_value, values_rom, offset, is_cc=True):
"""Generates C++ or Verilog friendly code for random tree classifier.
Generates HLS Catapult friendly code or RTL in Verilog for random tree
classifier from SkLearn.
Arguments:
tree: RandomTreeClassifier from sklearn.
code: list of strings containing code generated.
bits: list containing number of bits for each of the inputs.
bdd: we actually try to cache entries (i,v,n1,n0) entries so that if
they appear again, we reuse previously computed nodes.
max_value: random tree classifiers returns vector of classes with the
number of instances found in the terminal leaf node. This variable
specifies a clipping factor for each class type so that we have
a bounded problem to synthesize.
values_rom: to save space in classifier, we store class values in
values_rom.
offset: each variable created in this function call is incremented by
offset.
is_cc: if True, generates C++ code; otherwise, Verilog.
Returns:
Tuple containing last variable name and current number of variables.
"""
# extract information from tree
n_nodes = tree.node_count
children_left = tree.children_left
children_right = tree.children_right
feature = tree.feature
threshold = tree.threshold
values = {}
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)]
rom_l = []
use_rom = max_value >= 7
n_classes = len(tree.value[0].flatten())
max_bits = int(np.ceil(np.log2(max_value + 1)))
while stack:
node_id, parent_depth = stack.pop()
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth+1))
stack.append((children_right[node_id], parent_depth+1))
else:
# is leaf node
is_leaves[node_id] = True
# get tree node output
p_input_tuple = tree.value[node_id].flatten().astype(np.int32)
max_input_value = np.max(p_input_tuple)
min_input_value = np.min(p_input_tuple)
# if max_value == 1, only keep top ones
if max_value == 1:
input_tuple = (p_input_tuple == max_input_value).astype(np.int32)
tree.value[node_id] = (tree.value[node_id] == max_input_value).astype(
tree.value[node_id].dtype)
else: # if max_value <= 3:
# SKLearn classifier computes probability for each entry instead of
# suming them all. We should do the same.
max_input_value = np.sum(p_input_tuple)
min_input_value = 0
# Just update tree.value to number so that we can compare accuracy of
# quantization later.
tree.value[node_id] = np.round(
max_value *
(tree.value[node_id] - min_input_value) /
(max_input_value - min_input_value))
input_tuple = tree.value[node_id].flatten()
input_tuple = tuple(list(input_tuple.astype(np.int32)))
# stores values in rom - we will use rom to store values if use_rom is
# true.
if values_rom.get(input_tuple, None) is None:
values_rom[input_tuple] = len(values_rom)
rom_l.append(input_tuple)
if DEBUG:
print(values_rom[input_tuple], input_tuple)
if use_rom:
values[node_id] = values_rom[input_tuple]
else:
values[node_id] = entry_to_hex(
input_tuple, max_value, max_bits * n_classes, is_cc)
# t_bits: entry type
# l_bits: table line type
if use_rom:
t_bits = int(np.ceil(np.log2(len(values_rom))))
l_bits = max_bits * n_classes
else:
t_bits = max_bits * n_classes
# we only store the index here, as we read from a rom
if is_cc:
if DEBUG:
t_type = "int"
else:
t_type = "ac_int<{},false>".format(t_bits)
else:
t_type = "wire [{}:0]".format(t_bits-1)
v_name = {}
n_vars = offset
bdd = {}
if is_leaves[0]:
v_name[0] = t_type + "(" + str(values[0]) + ")"
code.append(" {} n_{} = {};".format(
t_type, n_vars, values[0]))
last_var = "n_{}".format(n_vars)
n_vars += 1
else:
for i in range(n_nodes-1, -1, -1):
if is_leaves[i]:
continue
if v_name.get(children_left[i], None) is not None:
n1 = v_name[children_left[i]]
elif is_leaves[children_left[i]]:
if is_cc:
n1 = t_type + "(" + str(values[children_left[i]]) + ")"
else:
n1 = str(values[children_left[i]])
v_name[children_left[i]] = n1
else:
n1 = "n_" + str(n_vars)
n_vars += 1
v_name[children_left[i]] = n1
raise ValueError((children_left[i], n1, is_leaves[children_left[i]]))
if v_name.get(children_right[i], None) is not None:
n0 = v_name[children_right[i]]
elif is_leaves[children_right[i]]:
if is_cc:
n0 = t_type + "(" + str(values[children_right[i]]) + ")"
else:
n0 = str(values[children_right[i]])
v_name[children_right[i]] = n0
else:
n0 = "n_" + str(n_vars)
n_vars += 1
v_name[children_right[i]] = n0
raise ValueError((children_right[i], n0, is_leaves[children_right[i]]))
if v_name.get(i, None) is not None:
n = v_name[i]
last_var = v_name[i]
elif bdd.get((feature[i], threshold[i], n1, n0), None) is not None:
n = bdd[(feature[i], threshold[i], n1, n0)]
v_name[i] = n
last_var = n
elif n1 == n0:
# store intermediate results so that we can build a dag, not a tree
bdd[(feature[i], threshold[i], n1, n0)] = n1
v_name[i] = n1
last_var = n1
else:
n = "n_" + str(n_vars)
n_vars += 1
v_name[i] = n
# store intermediate results so that we can build a dag, not a tree
bdd[(feature[i], threshold[i], n1, n0)] = n
t = int(threshold[i])
if bits[feature[i]] == 1:
if t == 0:
n1, n0 = n0, n1
code.append(
" {} {} = (i_{}) ? {} : {}; // x_{} {}".format(
t_type, v_name[i], feature[i], n1, n0, i,
threshold[i]))
else:
code.append(
" {} {} = (i_{} <= {}) ? {} : {}; // x_{} {}".format(
t_type, v_name[i], feature[i], t, n1, n0, i,
threshold[i]))
last_var = v_name[i]
if use_rom:
if is_cc:
if DEBUG:
l_type = "int"
else:
l_type = "ac_int<{},false>".format(l_bits)
code.append(" {} {}_rom[{}]".format(l_type, last_var, len(values_rom)) +
" {")
for i in range(len(values_rom)):
code_s = " " + entry_to_hex(rom_l[i], max_value, l_bits, is_cc)
if i < len(values_rom)-1:
code_s = code_s + ","
code.append(code_s)
code.append(" };")
else:
l_type = "wire [{}:0]".format(l_bits - 1)
code.append(" function [{}:0] {}_rom;".format(l_bits-1, last_var))
code.append(" input [{}:0] address;".format(t_bits-1))
code.append(" begin")
code.append(" case (address)")
for i in range(len(values_rom)):
code.append(" {}'d{}: {}_rom = {};".format(
l_bits, i, last_var, entry_to_hex(rom_l[i], max_value, l_bits, is_cc)))
code.append(" default: {}_rom = 0;".format(last_var))
code.append(" endcase")
code.append(" end")
code.append(" endfunction")
code.append(" {} v_{} = {}_rom[{}];".format(
l_type, last_var, last_var, last_var))
last_var = "v_" + last_var
return last_var, n_vars
def gen_random_forest(
rf, name, bits, is_neg, o_bits, o_is_neg, is_regressor=True,
is_top_level=False, is_cc=True):
"""Generates HLS based C++ or SystemVerilog code for random forest."""
# TODO(nunescoelho): need to take care of multiple outputs for classifier.
# we can get better result if we do not look at the winning classifier,
# but sum how many of them appear in each classifier for leaf nodes.
bdd = {}
values_rom = {}
offset = 0
code = []
max_value = (1 << int(os.environ.get("MAX_BITS",1))) - 1
decimal_digits = int(os.environ.get("MAX_BITS", 5))
assert max_value > 0
o_list = []
for i in range(len(rf.estimators_)):
tree = rf.estimators_[i].tree_
code.append(" //----- TREE {}".format(i))
if is_regressor:
last_var, offset = gen_random_tree_regressor(
tree, code, bits, o_bits, decimal_digits, o_is_neg, bdd, offset, is_cc)
else:
values_rom = {}
last_var, offset = gen_random_tree_classifier(
tree, code, bits, bdd, max_value, values_rom, offset, is_cc)
o_list.append(last_var)
if is_cc:
header = [
"#include <ac_int.h>",
"#include <ac_fixed.h>",
"#include <iostream>",
"using namespace std;",
"//#define _PRINT_DEBUG_",
"#define PB(n) cout << #n << \":\" << n << endl;",
"#define PS(n) \\",
" cout << #n << \":\" << n.to_double() << \" \"; \\",
" for(int i=n.width-1; i>=0; i--) cout << n[i]; cout << endl;"
]
if DEBUG:
header = header + [
"static inline float round_even(float x) {",
" int x_int = truncf(x);",
" float x_dec = x - x_int;",
" if ((x_dec == 0.5) && (x_int % 2 == 0)) {",
" return truncf(x);",
" } else {",
" return truncf(x + 0.5);"
" }",
"}"
]
if is_top_level:
header.append("#pragma hls_design top")
header.append("void {}(int in[{}], int &out)".format(
name, np.sum(bits), o_bits) + " {")
else:
n_bits = int(np.ceil(np.log2(len(o_list))))
header = header + [
"static inline ac_int<{},{}> round_even(ac_fixed<{},{},{}> x)".format(
o_bits, o_is_neg,
n_bits + o_bits + decimal_digits, n_bits + o_bits + o_is_neg,
o_is_neg
) + " {",
" bool x_int_is_even = x[{}] == 0;".format(decimal_digits + n_bits),
" bool x_frac_is_0_5 = x[{}] && (x.slc<{}>(0) == 0);".format(
n_bits + decimal_digits-1, n_bits + decimal_digits-1),
" if (x_frac_is_0_5 && x_int_is_even) {",
" return x.slc<{}>({});".format(o_bits, n_bits + decimal_digits),
" } else {",
" ac_int<{},{}> r = x.slc<{}>({}) + 1;".format(
o_bits + 1, o_is_neg,
o_bits + 1, n_bits + decimal_digits - 1),
" return r.slc<{}>(1);".format(o_bits + 1),
#" return (x + ac_fixed<{},{},{}>({})).slc<{}>({});".format(
# n_bits + o_bits + decimal_digits, n_bits + o_bits + o_is_neg,
# o_is_neg, 1<<(n_bits+decimal_digits-1),
# o_bits, n_bits + decimal_digits),
# #o_is_neg, len(o_list)/2, o_bits, n_bits + decimal_digits),
" }",
"}"
]
if is_top_level:
header.append("#pragma hls_design top")
header.append("void {}(ac_int<{},0> in, ac_int<{},{}> &out)".format(
name, np.sum(bits), o_bits, o_is_neg) + " {")
else:
n_bits = int(np.ceil(np.log2(len(o_list))))
i_decl = " input [{}:0] in;".format(np.sum(bits)-1)
o_sign = "signed " if o_is_neg else ""
o_decl = " output " + o_sign + "[{}:0] out;".format(o_bits-1)
header = [
"module " + name + "(in, out);",
i_decl,
o_decl,
"",
" function {}[{}:0] round_even;".format(o_sign, o_bits),
" input {}[{}:0] x;".format(o_sign, n_bits + o_bits + decimal_digits - 1),
" reg x_int_is_even;",
" reg x_frac_is_0_5;",
" reg {}[{}:0] round_sum;".format(o_sign, o_bits + 1),
" begin",
" x_int_is_even = x[{}] == 0;".format(decimal_digits + n_bits),
" x_frac_is_0_5 = x[{}] && (x[{}:0] == 0);".format(
n_bits + decimal_digits-1, n_bits + decimal_digits - 2),
" if (x_frac_is_0_5 && x_int_is_even)",
" round_even = x[{}:{}];".format(
n_bits + decimal_digits + o_bits - 1, n_bits + decimal_digits),
" else",
" begin",
" round_sum = x[{}:{}] + 1;".format(
n_bits + decimal_digits + o_bits - 1, n_bits + decimal_digits - 1),
" round_even = round_sum[{}:1];".format(o_bits + 1),
" end",
#" round_even = (x + {})[{}:{}];".format(
# #(1 << (n_bits + decimal_digits - 1)),
# n_bits + decimal_digits + o_bits - 1, n_bits + decimal_digits),
" end",
" endfunction"
]
all_bits = np.sum(bits)
sum_i = 0
for i in range(bits.shape[0]):
if is_cc:
if bits[i] > 1:
if DEBUG:
header.append(" int i_{} = in[{}];".format(i, i))
else:
header.append(" ac_int<{},{}> i_{} = in.slc<{}>({});".format(
bits[i], is_neg[i], i, bits[i], sum_i))
else:
header.append(" bool i_{} = in[{}];".format(i, sum_i))
else:
if bits[i] == 1:
header.append(" wire i_{} = in[{}];".format(i, all_bits - sum_i - 1))
else:
header.append(" wire i_{}[{}:0] = in[{}:{}];".format(
i, bits[i], sum_i + bits[i] - 1, all_bits - sum_i - 1))
sum_i += bits[i]
footer = []
if is_regressor:
n_bits = int(np.ceil(np.log2(len(o_list))))
assert 1 << n_bits == len(o_list)
if is_cc:
if DEBUG:
tmp_type = "float"
else:
tmp_type = "ac_fixed<{},{},{}>".format(
n_bits + o_bits + decimal_digits, n_bits + o_bits + o_is_neg,
o_is_neg)
avg_o = " {} o_tmp = {};".format(tmp_type, " + ".join(o_list))
# rnd_o = " o_tmp += {}({});".format(tmp_type, len(o_list)/2)
if DEBUG:
out = " out = round_even(o_tmp / {});".format(len(o_list))
else:
out = " out = round_even(o_tmp);"
footer.append(" #ifdef _PRINT_DEBUG_")
for o_name in o_list:
footer.append(" PS({});".format(o_name))
footer.append(" #endif")
closing = "}"
else:
tmp_sign = "signed " if o_is_neg else ""
avg_o = " wire " + tmp_sign + "[{}:0] o_tmp = {};".format(
n_bits + o_bits + decimal_digits - 1, " + ".join(o_list))
for n in o_list:
footer.append(" // always @({}) $display(\"{} = %f (%b)\", {} / 32.0, {});".format(n,n,n,n))
footer.append(" // always @(o_tmp) $display(\"o_tmp = %b\", o_tmp);")
out = " assign out = round_even(o_tmp);"
closing = "endmodule"
footer = footer + [avg_o, out, closing]
else:
assert not o_is_neg
footer = []
o_suffix = ""
if DEBUG:
o_type = "int"
elif is_cc:
o_type = "ac_int<{},{}>".format(o_bits, o_is_neg)
else:
o_sign = " signed" if o_is_neg else ""
o_suffix = "[{}:0]".format(o_bits)
o_type = "wire" + o_sign + " " + o_suffix
if is_cc:
n_classes = 1 << o_bits
max_bits = int(np.ceil(np.log2(max_value + 1)))
log2_o_list = int(np.ceil(np.log2(len(o_list))))
if DEBUG:
log2_o_type = "int"
else:
log2_o_type = "ac_int<{},false>".format(log2_o_list + max_bits)
sum_v = (
" {} sum[{}] = ".format(
log2_o_type, 1 << o_bits) + "{" +
",".join("0" * (1 << o_bits)) + "};"
)
footer = [sum_v]
for o_name in o_list:
for i in range(n_classes):
if DEBUG:
footer.append(" sum[{}] += ({} >> {}) & {};".format(
i, o_name, (n_classes - i) * max_bits - max_bits,
hex((1 << max_bits) - 1)))
else:
footer.append(" sum[{}] += {}.slc<{}>({});".format(
i, o_name, max_bits, (n_classes - i) * max_bits - max_bits))
debug_print = []
for i in range(n_classes):
debug_print.append("{}.slc<{}>({}).to_string(AC_DEC)".format(
o_name, max_bits, (n_classes - i) * max_bits - max_bits))
footer_s = (
" cout << \"{} \" <<".format(o_name) +
" << \" \" << ".join(debug_print) + " << endl;"
)
footer.append(" #ifdef _PRINT_DEBUG_")
footer.append(footer_s)
footer.append(" #endif")
footer.append(" {} max_tmp = sum[0];".format(log2_o_type))
footer.append(" {} max_id = 0;".format(o_type))
footer.append(" for(int i=1; i<{}; i++)".format(1 << o_bits))
footer.append(
" if (sum[i] >= max_tmp) { max_tmp = sum[i]; max_id = i; }")
out = " out = max_id;"
footer.append(out)
footer += ["}"]
else:
n_classes = 1 << o_bits
max_bits = int(np.ceil(np.log2(max_value + 1)))
log2_o_list = int(np.ceil(np.log2(len(o_list))))
log2_o_type = "wire [{}:0]".format(log2_o_list + max_bits)
footer = []
for i in range(n_classes):
code_s = " {} sum_{} = ".format(log2_o_type, i)
code_term = []
for o_name in o_list:
code_term.append("{}[{}:{}]".format(
o_name, (n_classes - i) * max_bits, (n_classes - i) * max_bits - max_bits))
code_s += " + ".join(code_term) + ";"
footer.append(code_s)
footer.append(" // always @(sum_{}) $display(\"sum_{} = %d\", sum_{});".format(
i, i, i))
footer.append(" reg [{}:0] max_tmp;".format(
log2_o_list + max_bits - 1))
footer.append(" reg [{}:0] max_id;".format(o_bits-1))
footer.append(" integer i;")
footer.append(" always @(" +
" or ".join(
["sum_" + str(i) for i in range(n_classes)]) + ")")
footer.append(" begin")
footer.append(" max_tmp = sum_0; max_id = 0;")
for i in range(1, n_classes):
footer.append(
" if (sum_{} >= max_tmp) begin max_tmp = sum_{}; max_id = {}; end".format(
i, i, i))
footer.append(" end")
footer.append(" assign out = max_id;")
footer.append("endmodule")
return header + code + footer
def gen_testbench_sv(rf, name, bits, is_neg, o_bits, o_is_neg, x, y, p, code):
code.append("module tb;")
x_0, x_1 = x.shape
x_0_log2 = int(np.ceil(np.log2(x_0)))
code.append("reg [{}:0] x_rom[{}:0];".format(x_1-1, x_0-1))
code.append("initial $readmemb(\"x.rom\", x_rom, 0, {});".format(x_0-1))
with open("x.rom", "w") as f:
for i in range(len(x)):
f.write("".join([str(int(v)) for v in x[i]]) + "\n")
o_sign = "signed " if o_is_neg else ""
o_type = o_sign + "[{}:0]".format(o_bits - 1)
code.append("reg {} y_rom[{}:0];".format(o_type,x_0-1))
code.append("reg {} p_rom[{}:0];".format(o_type,x_0-1))
with open("y.rom","w") as f:
for i in range(len(y)):
f.write(hex(int(y[i]))+ "\n")
with open("p.rom","w") as f:
for i in range(len(y)):
f.write(hex(int(p[i]))+ "\n")
code.append("initial $readmemh(\"y.rom\", y_rom, 0, {});".format(x_0-1))
code.append("initial $readmemh(\"p.rom\", p_rom, 0, {});".format(x_0-1))
code.append("integer i;")
code.append("integer cnt;")
code.append("reg [{}:0] in;".format(x_1-1))
code.append("wire {} out;".format(o_type))
code.append("{} {}(in, out);".format(name, name))
code.append("initial")
code.append("begin")
code.append(" cnt = 0;")
code.append(" in = x_rom[i];")
code.append(" for (i=0; i<{}; i=i+1)".format(x_0))
code.append(" begin")
code.append(" in = x_rom[i];")
code.append(" #1000;")
code.append(" if (p_rom[i] != out && y_rom[i] != out)")
code.append(" begin")
code.append(" $display(\"%d: %b y=%d p=%d -> %d\", i, x_rom[i], y_rom[i], p_rom[i], out);")
code.append(" end")
code.append(" else")
code.append(" begin")
code.append(" cnt = cnt + 1;")
code.append(" end")
code.append(" end")
code.append(" $display(\"acc = %f\", 100.0 * cnt / {});".format(x_0))
code.append("end")
code.append("endmodule")
def gen_testbench_cc(rf, name, bits, is_neg, o_bits, o_is_neg, x, y, p, code):
code.append("int x[{}][{}] = ".format(*x.shape) + "{")
for i in range(len(x)):
code_s = " {" + ",".join([str(int(v)) for v in x[i]]) + "}"
if i < len(x) - 1:
code_s = code_s + ","
code.append(code_s)
code.append("};")
code_s = (
"int y[{}] = ".format(y.shape[0]) + "{" +
",".join([str(int(v)) for v in y]) + "};"
)
code.append(code_s)
code_s = (
"int p[{}] = ".format(p.shape[0]) + "{" +
",".join([str(int(v)) for v in p]) + "};"
)
code.append(code_s)
code.append("int main()")
code.append("{")
code.append(" double acc = 0.0;")
if DEBUG:
code.append(" int in[{}];".format(x.shape[1]))
code.append(" int out;")
else:
code.append(" ac_int<{},0> in;".format(x.shape[1]))
code.append(" ac_int<{},{}> out;".format(o_bits, o_is_neg))
code.append(" for (int i=0; i<{}; i++)".format(x.shape[0]) + "{")
code.append(" for (int j=0; j<{}; j++) in[j] = x[i][j];".format(
x.shape[1]))
code.append(" {}(in, out);".format(name))
code.append(" if (p[i] != out && y[i] != out) {")
code.append(" cout << i << \": \";")
code.append(" for (int j=0; j<{}; j++) cout << in[j];".format(
x.shape[1]))
if DEBUG:
code.append(" cout << \" y=\" << y[i] << \" p=\" << p[i] << \" \" << out << endl;")
code.append(" }")
code.append(" acc += (y[i] == out);")
else:
code.append(" cout << \" y=\" << y[i] << \" p=\" << p[i] << \" \" << out.to_int() << endl;")
code.append(" #ifdef _PRINT_DEBUG_")
code.append(" exit(1);")
code.append(" #endif")
code.append(" }")
code.append(" acc += (y[i] == out.to_int());")
code.append(" }")
code.append(" cout << \"acc = \" << 100.0 * acc / {} << endl;".format(
x.shape[0]))
code.append("}")
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tseries/tests/test_period.py | 9 | 153010 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
from pandas import Timestamp
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
from pandas import Series, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
#
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period('nat', freq='W-SUN')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'W-SUN')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='3D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, offsets.Day(3))
self.assertEqual(p.freqstr, '3D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertRaises(ValueError, Period, 'NaT')
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
self.assertEqual(p1.ordinal, p2.ordinal)
self.assertEqual(p1.freq, offsets.MonthEnd(3))
self.assertEqual(p1.freqstr, '3M')
self.assertEqual(p2.freq, offsets.MonthEnd())
self.assertEqual(p2.freqstr, 'M')
result = p1 + 1
self.assertEqual(result.ordinal, (p2 + 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
result = p1 - 1
self.assertEqual(result.ordinal, (p2 - 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='0M')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil(self):
from pandas.tslib import _dateutil_gettz as gettz
from pandas.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_nat_tz(self):
t = Period('NaT', freq='M').to_timestamp()
self.assertTrue(t is tslib.NaT)
t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo')
self.assertTrue(t is tslib.NaT)
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-01-31'))
p = pd.Period('2011-01', freq='3M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-03-31'))
def test_timestamp_nat_mult(self):
for freq in ['M', '3M']:
p = pd.Period('NaT', freq=freq)
self.assertTrue(p.to_timestamp(how='S') is pd.NaT)
self.assertTrue(p.to_timestamp(how='E') is pd.NaT)
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_period_constructor_offsets(self):
self.assertEqual(Period('1/1/2005', freq=offsets.MonthEnd()),
Period('1/1/2005', freq='M'))
self.assertEqual(Period('2005', freq=offsets.YearEnd()),
Period('2005', freq='A'))
self.assertEqual(Period('2005', freq=offsets.MonthEnd()),
Period('2005', freq='M'))
self.assertEqual(Period('3/10/12', freq=offsets.BusinessDay()),
Period('3/10/12', freq='B'))
self.assertEqual(Period('3/10/12', freq=offsets.Day()),
Period('3/10/12', freq='D'))
self.assertEqual(Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=1, freq='Q'))
self.assertEqual(Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=2, freq='Q'))
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day()),
Period(year=2005, month=3, day=1, freq='D'))
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay()),
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day(3)),
expected)
self.assertEqual(Period(year=2005, month=3, day=1, freq='3D'),
expected)
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay(3)),
Period(year=2012, month=3, day=10, freq='3B'))
self.assertEqual(Period(200701, freq=offsets.MonthEnd()),
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1.year, 18695)
self.assertEqual(i2.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertEqual(i1.freq, offsets.Minute())
self.assertEqual(i1.freqstr, 'T')
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assertIsInstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
self.assertRaises(ValueError, left.__sub__,
Period('2007-01', freq='M'))
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
# freq with mult should not affect to the result
self.assertEqual(start_ts, p.to_timestamp('3D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
self.assertEqual(end_ts, p.to_timestamp('3D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp('2T', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('5S', how='start')
self.assertEqual(result, expected)
p = Period('NaT', freq='W')
self.assertTrue(p.to_timestamp() is tslib.NaT)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
p = Period('NaT', freq='W')
self.assertTrue(p.start_time is tslib.NaT)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
xp = _ex(2012, 1, 2)
p = Period('2012', freq='D')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 1, 1)
p = Period('2012', freq='H')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 3)
self.assertEqual(Period('2012', freq='B').end_time, xp)
xp = _ex(2012, 1, 2)
self.assertEqual(Period('2012', freq='W').end_time, xp)
p = Period('NaT', freq='W')
self.assertTrue(p.end_time is tslib.NaT)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
assert_equal(Period(freq='W', year=2012, month=2, day=1).days_in_month, 29)
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
with tm.assert_produces_warning(FutureWarning):
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
with tm.assert_produces_warning(FutureWarning):
exp = Period(freq='WK', year=2012, month=2, day=1)
assert_equal(exp.days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.dayofyear, 1)
assert_equal(b_date.days_in_month, 31)
assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.dayofyear, 1)
assert_equal(d_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.dayofyear, 1)
assert_equal(h_date.hour, 0)
assert_equal(h_date.days_in_month, 31)
assert_equal(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.dayofyear, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(t_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.dayofyear, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
assert_equal(s_date.days_in_month, 31)
assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
val2 = period.pnow('2D')
exp2 = Period(dt, freq='2D')
self.assertEqual(val2, exp2)
self.assertEqual(val.ordinal, val2.ordinal)
self.assertEqual(val.ordinal, exp2.ordinal)
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
self.assertEqual(Period(year=2007, month=1, freq='2M'), expected)
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertRaises(ValueError, Period)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M'))
self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S")
tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('W'), ival_W)
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
with tm.assert_produces_warning(FutureWarning):
ival_W = Period(freq='WK', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
with tm.assert_produces_warning(FutureWarning):
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
with tm.assert_produces_warning(FutureWarning):
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
with tm.assert_produces_warning(FutureWarning):
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
with tm.assert_produces_warning(FutureWarning):
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
with tm.assert_produces_warning(FutureWarning):
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
with tm.assert_produces_warning(FutureWarning):
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
with tm.assert_produces_warning(FutureWarning):
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('W'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('W'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('W'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('W'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
def test_asfreq_nat(self):
p = Period('NaT', freq='A')
result = p.asfreq('M')
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# mult freq to normal freq
p = Period(freq='3A', year=2007)
# ordinal will change because how=E is the default
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period('2009', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2007-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='3A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2009-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
def test_asfreq_mult_nat(self):
# normal freq to mult freq
for p in [Period('NaT', freq='A'), Period('NaT', freq='3A'),
Period('NaT', freq='2M'), Period('NaT', freq='3D')]:
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
result = p.asfreq(freq, how='S')
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assertIsInstance(series, Series)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_numpy_array_equal(result, idx.values)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assertTrue(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assertTrue(index.equals(expected))
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
self.assertTrue(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assertTrue(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assertTrue(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx)
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq=offsets.MonthEnd())
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, 'M')
result = PeriodIndex(idx, freq='2M')
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assertTrue(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=20, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
def test_constructor_nat(self):
self.assertRaises(
ValueError, period_range, start='NaT', end='2011-01-01', freq='M')
self.assertRaises(
ValueError, period_range, start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11',
'2014-01-14'], freq='D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freq)
tm.assert_index_equal(pidx, expected)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assertIsInstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(
KeyError, "left slice bound for non-unique label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
self.assertTrue(Period('2007-01', freq='M') in rng)
self.assertFalse(Period('2007-01', freq='D') in rng)
self.assertFalse(Period('2007-01', freq='2M') in rng)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assertTrue(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assertIsInstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assertTrue(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEqual( eval(repr(z)), z)
def test_to_timestamp_pi_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
self.assertTrue(result2.equals(index))
self.assertEqual(result2.name, 'idx')
result3 = result.to_period(freq='3M')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx')
self.assert_index_equal(result3, exp)
self.assertEqual(result3.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -2A')
with tm.assertRaisesRegexp(ValueError, msg):
result.to_period(freq='-2A')
def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx')
result = idx.to_timestamp()
expected = DatetimeIndex(['2011-01-01', 'NaT', '2011-02-01'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E')
expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], name='idx')
self.assert_index_equal(result, expected)
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assertIsInstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.set_index(idx2)
self.assertTrue(df.index.equals(idx2))
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
self.assertTrue(result.index.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
self.assertTrue(result.columns.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
# invalid axis
assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
self.assertTrue(isinstance(result1.columns, DatetimeIndex))
self.assertTrue(isinstance(result2.columns, DatetimeIndex))
self.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
self.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
self.assertEqual(result1.columns.freqstr, 'AS-JAN')
self.assertEqual(result2.columns.freqstr, 'AS-JAN')
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
self.assertTrue(pi1.shift(0).equals(pi1))
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
result1 = pi1.asfreq('3M')
result2 = pi1.asfreq('M')
expected = PeriodIndex(freq='M', start='2001-12', end='2001-12')
self.assert_numpy_array_equal(result1.asi8, expected.asi8)
self.assertEqual(result1.freqstr, '3M')
self.assert_numpy_array_equal(result2.asi8, expected.asi8)
self.assertEqual(result2.freqstr, 'M')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
self.assertTrue(result.equals(expected))
def test_asfreq_mult_pi(self):
pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M')
for freq in ['D', '3D']:
result = pi.asfreq(freq)
exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT',
'2001-04-30'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
result = pi.asfreq(freq, how='S')
exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT',
'2001-03-01'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(exp_index))
self.assertTrue(df_result.index.equals(exp_index))
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(index.asfreq('D', how='start')))
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '0', 'A')
def test_negative_ordinals(self):
p = Period(ordinal=-1000, freq='A')
p = Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_numpy_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
pi3 = dti.to_period(freq='3D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi3[0], Period('1/31/2005', freq='3D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
self.assertEqual(pi3[-1], Period('11/30/2005', freq='3D'))
tm.assert_index_equal(pi1, period_range('1/1/2005', '11/1/2005', freq='M'))
tm.assert_index_equal(pi2, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('D'))
tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('3D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01'], s[0:31])
assert_series_equal(s['2013/02'], s[31:59])
assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/02':], s[1:])
assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
assert_series_equal(s['2013/02':], s[31:])
assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(IndexError):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660])
assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960])
assert_series_equal(s['2013/01/01 10H':], s[3600:])
assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D')
self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012]))
self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4]))
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
self.assertTrue(pi.to_timestamp().equals(dti))
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
self.assertTrue(pi.to_timestamp().equals(dti))
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
self.assertTrue(rng.equals(result))
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng = date_range('01-Jan-2012', periods=8, freq='EOM')
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
self.assertEqual(result1.ordinal, result2.ordinal)
self.assertEqual(result1.freqstr, '2A-DEC')
self.assertEqual(result2.freqstr, 'A-DEC')
self.assertEqual(result1.freq, offsets.YearEnd(2))
self.assertEqual(result2.freq, offsets.YearEnd())
self.assertEqual((result1 + 1).ordinal, result1.ordinal + 2)
self.assertEqual((result1 - 1).ordinal, result2.ordinal - 2)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assertIsInstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assertIsInstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with assertRaisesRegexp(ValueError, msg):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
self.assertTrue(result.equals(index))
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
self.assertTrue(result.equals(index))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.union, index2)
self.assertRaises(ValueError, index.join, index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.join, index3)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
self.assertTrue(result.equals(index[10:-5]))
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
self.assertTrue(result.equals(index[10:-5]))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.intersection, index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.intersection, index3)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'weekofyear', 'week', 'dayofweek',
'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert_equal(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
assert_equal(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
self.assertTrue(result.equals(expected))
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
tm.assert_numpy_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if compat.PY3:
# unicode
types += compat.text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assertIsInstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
tm.assert_numpy_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assertIsInstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assertIsInstance(s.index.levels[0], PeriodIndex)
tm.assertIsInstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assertIsInstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq, offsets.MonthEnd())
self.assertEqual(new_prng.freqstr, 'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
self.assertTrue(idx.equals(org))
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
for freq in ['D', '2D']:
pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03',
'2014-01-04', '2014-01-05'], freq=freq)
p1 = pd.Period('2014-01-01', freq=freq)
self.assertEqual(pidx.searchsorted(p1), 0)
p2 = pd.Period('2014-01-04', freq=freq)
self.assertEqual(pidx.searchsorted(p2), 3)
msg = "Input has different freq=H from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='H'))
msg = "Input has different freq=5D from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='5D'))
def test_round_trip(self):
p = Period('2000Q1')
new_p = self.round_trip_pickle(p)
self.assertEqual(new_p, p)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
"Base test class for MaskedArrays."
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
# GH 4731
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq=freq))
self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq=freq))
self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq=freq))
self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq=freq))
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq=freq))
self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq=freq))
self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq=freq))
self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq=freq))
self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq=freq))
self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq=freq))
self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq=freq))
self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq=freq))
self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq=freq))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq=freq))
self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq=freq))
self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq=freq))
self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq=freq))
self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq=freq))
self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertEqual((p - 1).ordinal, tslib.iNaT)
self.assertEqual((p - Period('2011-01', freq=freq)).ordinal, tslib.iNaT)
self.assertEqual((Period('2011-01', freq=freq) - p).ordinal, tslib.iNaT)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + 2
expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
result2 = result - 2
self.assertTrue(result2.equals(idx))
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
def test_pi_ops_array(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.add(idx, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.subtract(idx, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
# incompatible freq
msg = "Input has different freq from PeriodIndex\(freq=M\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 'D')] * 4)
idx = PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', 'NaT',
'2011-01-01 12:00'], freq='H', name='idx')
result = idx + np.array([np.timedelta64(1, 'D')] * 4)
exp = PeriodIndex(['2011-01-02 09:00', '2011-01-02 10:00', 'NaT',
'2011-01-02 12:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([np.timedelta64(1, 'h')] * 4)
exp = PeriodIndex(['2011-01-01 08:00', '2011-01-01 09:00', 'NaT',
'2011-01-01 11:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
msg = "Input has different freq from PeriodIndex\(freq=H\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 's')] * 4)
idx = PeriodIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', 'NaT',
'2011-01-01 12:00:00'], freq='S', name='idx')
result = idx + np.array([np.timedelta64(1, 'h'), np.timedelta64(30, 's'),
np.timedelta64(2, 'h'), np.timedelta64(15, 'm')])
exp = PeriodIndex(['2011-01-01 10:00:00', '2011-01-01 10:00:30', 'NaT',
'2011-01-01 12:15:00'], freq='S', name='idx')
self.assert_index_equal(result, exp)
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng.values, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat),
(nat, t), (t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_pi_comp(self):
for freq in ['M', '2M', '3M']:
base = PeriodIndex(['2011-01', '2011-02',
'2011-03', '2011-04'], freq=freq)
p = Period('2011-02', freq=freq)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base == p, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base != p, exp)
exp = np.array([False, False, True, True])
self.assert_numpy_array_equal(base > p, exp)
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(base < p, exp)
exp = np.array([False, True, True, True])
self.assert_numpy_array_equal(base >= p, exp)
exp = np.array([True, True, False, False])
self.assert_numpy_array_equal(base <= p, exp)
idx = PeriodIndex(['2011-02', '2011-01', '2011-03', '2011-05'], freq=freq)
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(base == idx, exp)
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(base != idx, exp)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base > idx, exp)
exp = np.array([True, False, False, True])
self.assert_numpy_array_equal(base < idx, exp)
exp = np.array([False, True, True, False])
self.assert_numpy_array_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base <= idx, exp)
# different base freq
msg = "Input has different freq=A-DEC from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A')
base <= idx
# different mult
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='4M')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M')
base <= idx
def test_pi_nat_comp(self):
for freq in ['M', '2M', '3M']:
idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq)
result = idx1 > Period('2011-02', freq=freq)
exp = np.array([False, False, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == Period('NaT', freq=freq)
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != Period('NaT', freq=freq)
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(result, exp)
diff = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='4M')
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
idx1 > diff
with tm.assertRaisesRegexp(ValueError, msg):
idx1 == diff
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
akionakamura/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
deepmind/bsuite | setup.py | 1 | 3066 | # python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
import imp
import setuptools
# Additional requirements for TensorFlow baselines, excluding OpenAI & Dopamine.
# See baselines/README.md for more information.
baselines_require = [
'dm-sonnet',
'dm-tree',
'tensorflow',
'tensorflow_probability',
'trfl',
'tqdm',
]
# Additional requirements for JAX baselines.
# See baselines/README.md for more information.
baselines_jax_require = [
'dataclasses',
'dm-haiku',
'dm-tree',
'jax',
'jaxlib',
'optax',
'rlax',
'tqdm',
]
baselines_third_party_require = [
'tensorflow == 1.15',
'dopamine-rl',
'baselines',
]
testing_require = [
'mock',
'pytest-xdist',
'pytype',
]
setuptools.setup(
name='bsuite',
description=('Core RL Behaviour Suite. '
'A collection of reinforcement learning experiments.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/deepmind/bsuite',
author='DeepMind',
author_email='[email protected]',
license='Apache License, Version 2.0',
version=imp.load_source('_metadata', 'bsuite/_metadata.py').__version__,
keywords='reinforcement-learning python machine-learning',
packages=setuptools.find_packages(),
install_requires=[
'absl-py',
'dm_env',
'frozendict',
'gym',
'matplotlib',
'numpy',
'pandas',
'plotnine',
'scipy',
'scikit-image',
'six',
'termcolor',
],
extras_require={
'baselines': baselines_require,
'baselines_jax': baselines_jax_require,
'baselines_third_party': baselines_third_party_require,
'testing': testing_require,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| apache-2.0 |
landmanbester/Copernicus | My2Ddist.py | 1 | 7177 | import pylab
import numpy
from numpy import argwhere, zeros, hstack, append
import matplotlib.patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy.stats
import matplotlib as mpl
def fracs_inside_contours(x, y, contours):
"""
Calculate the fraction of points x,y inside each contour level.
"""
fracs = []
xy = numpy.vstack([x,y]).transpose()
for (icollection, collection) in enumerate(contours.collections):
path = collection.get_paths()[0]
frac = float(sum(path.contains_points(xy)))/len(x)
fracs.append(frac)
return fracs
def frac_label_contours(x, y, contours, format='%.2f'):
"""
Label contours according to the fraction of points x,y inside.
"""
fracs = fracs_inside_contours(x,y,contours)
levels = contours.levels
labels = {}
for (level, frac) in zip(levels, fracs):
labels[level] = format % frac
contours.clabel(fmt=labels)
def contour_enclosing(x, y, fractions, xgrid, ygrid, zvals,
axes, nstart = 200,
*args, **kwargs):
"""
Plot contours encompassing specified fractions of points x,y.
"""
# Generate a large set of contours initially.
contours = axes.contour(xgrid, ygrid, zvals, nstart,extend='both')
# Set up fracs and levs for interpolation.
levs = contours.levels
fracs = numpy.array(fracs_inside_contours(x,y,contours))
sortinds = numpy.argsort(fracs)
levs = levs[sortinds]
fracs = fracs[sortinds]
# Find the levels that give the specified fractions.
levels = scipy.interp(fractions, fracs, levs)
# Remove the old contours from the graph.
for coll in contours.collections:
coll.remove()
# Reset the contours
contours.__init__(axes, xgrid, ygrid, zvals, levels, *args, **kwargs)
return contours
def invert_boxcox(z,lam):
return (1+z*lam)**(1.0/lam)
def plot2Ddist(variables,axeslist=None,maxvalues=None,histbinslist=[100, 100],
labels=[r'$l$',r'$\sigma_f$'],scaleview=True,plotscatter=True,
plothists=True,plotcontours=True,contourNGrid=200,bcx=True,
contourFractions=[0.68, 0.95],labelcontours=True):
"""
Plot contours of 2D distribution with marginal histograms:
Input:
variables = 2d array with samples
axes = optional pass axes to add plots to
maxvalues = values of hypers that maximise marginal posterior
histbinlist = number of bins to use for the histogram
labels = optional x and y axis labels
scaleview = optional argument determines whether to set the axes limits according to the plotted data
plotscatter, plothists, plotcontours = optional bool whether to plot the scatter, marginal histograms, and contours
contourNGrid = int number of grid points to evaluate kde on
contourFractions = optional % levels for contours
labelcontours = bool whether to label the contours with the fraction of points enclosed
"""
### Set up figures and axes. ###
if axeslist is None:
fig1 = pylab.figure(figsize=(8,8))
fig1.set_label('traces')
ax1 = pylab.gca()
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
ax3 = divider.append_axes("right", 1.5, pad=0.0, sharey=ax1)
for tl in (ax2.get_xticklabels() + ax2.get_yticklabels() + ax3.get_xticklabels() + ax3.get_yticklabels()):
tl.set_visible(False)
axeslist = (ax1, ax2, ax3)
else:
ax1, ax2, ax3 = axeslist
#Do box-cox transform on data
if bcx:
x, mx = scipy.stats.boxcox(variables[0])
y, my = scipy.stats.boxcox(variables[1])
else:
x = variables[0]
y = variables[1]
### Plot the variables. ###
# Plot 2D scatter of variables.
if plotscatter:
ax1.plot(x, y,ls='',marker=',',color='r',alpha=0.15)
#Here we use kde to plot contours, might be better to use smoothing splines
if plotcontours:
style = {'linewidths':2.0, 'alpha':0.75,'zorder':10,'color':'k'}
gkde = scipy.stats.gaussian_kde([x,y])
xgrid, ygrid = numpy.mgrid[min(x):max(x):contourNGrid * 1j,min(y):max(y):contourNGrid * 1j]
zvals = numpy.array(gkde.evaluate([xgrid.flatten(),ygrid.flatten()])).reshape(xgrid.shape)
contours = contour_enclosing(x, y, contourFractions,xgrid, ygrid, zvals,ax1, **style)
# Plot marginal histograms.
if plothists:
style = {'histtype':'step', 'normed':True, 'color':'k'}
ax2.hist(x, histbinslist[0], **style)
ax3.hist(y, histbinslist[1], orientation='horizontal', **style)
# Plot lines to indicate max values.
if maxvalues is not None:
ax1.axvline(x=maxvalues[0], ls=':', c='k')
ax1.axhline(y=maxvalues[1], ls=':', c='k')
ax2.axvline(x=maxvalues[0], ls=':', c='k')
ax3.axhline(y=maxvalues[1], ls=':', c='k')
if scaleview:
ax2.relim()
ax3.relim()
ax1.relim()
ax2.autoscale_view(tight=True)
ax3.autoscale_view(tight=True)
ax1.autoscale_view(tight=True)
ax2.set_ylim(bottom=0)
ax3.set_xlim(left=0)
#Set labels
ax1.set_xlabel(labels[0],fontsize=35)
ax1.set_ylabel(labels[1],fontsize=35)
if plotcontours and labelcontours:
frac_label_contours(x, y, contours)
return fig1
def plot2Ddist2(x,y,ax1,contourNGrid=200,contourFractions=[0.68, 0.95],mode1='x',mode2='y',colour='blue',alp=0.5):
"""
Plot contours of 2D distribution
"""
# Plot 2D scatter of variables.
#ax1.plot(x, y,ls='',marker=',',color='r',alpha=0.15)
#Here we use kde to plot contours, might be better to use smoothing splines
style = {'linewidths':1.0, 'alpha':0.0,'zorder':10,'color':'k'}
gkde = scipy.stats.gaussian_kde([x,y])
# bw = gkde.covariance_factor()
# gkde = scipy.stats.gaussian_kde([x,y],bw_method=bw/10.0)
xgrid, ygrid = numpy.mgrid[min(x):max(x):contourNGrid * 1j,min(y):max(y):contourNGrid * 1j]
zvals = numpy.array(gkde.evaluate([xgrid.flatten(),ygrid.flatten()])).reshape(xgrid.shape)
contours = contour_enclosing(x, y, contourFractions,xgrid, ygrid, zvals,ax1, **style)
p1 = contours.collections[1].get_paths()[0]
v1 = p1.vertices
p2 = contours.collections[0].get_paths()[0]
v2 = p2.vertices
if (mode1 == 'fill'):
t1 = v1[-1,0]
v10 = v1[:,0]
q1 = argwhere(v10 < t1)
l1 = q1.size
v10app = v10[q1]
z1 = zeros([l1,1])
v11app = hstack((z1,v10app))
v1 = append(v1,v11app,axis=0)
if (mode2=='fill'):
t2 = v2[-1,0]
v20 = v2[:,0]
q2 = argwhere(v20 < t2)
l2 = q2.size
v20app = v20[q2]
z2 = zeros([l2,1])
v21app = hstack((z2,v20app))
v2 = append(v2,v21app,axis=0)
ax1.fill(v1[:,0],v1[:,1],colour,alpha=alp)
#ax1.fill(v2[:,0],v2[:,1],colour,alpha=0.5)
#Label the contours
#frac_label_contours(x, y, contours)
#ax1.fill()
# ax1.relim()
#ax1.autoscale_view(True,True,True)
return contours | gpl-3.0 |
rustychris/stompy | stompy/model/suntans/sun_driver.py | 1 | 113146 | import os
import glob
import copy
import subprocess
import six
from collections import defaultdict
import re
import xarray as xr
import numpy as np
import datetime
from matplotlib.dates import date2num, num2date
from ... import utils, memoize
#from ..delft import dflow_model as dfm
from .. import hydro_model as hm
from ..delft import dfm_grid
from ...grid import unstructured_grid
from ...spatial import linestring_utils
from . import store_file
import logging as log
try:
import pytz
utc = pytz.timezone('utc')
except ImportError:
log.warning("Couldn't load utc timezone")
utc = None
datenum_precision_per_s = 100 # 10ms - should be evenly divisible into 1e6
def dt_round(dt):
""" Given a datetime or timedelta object, round it to datenum_precision
"""
if isinstance(dt,datetime.timedelta):
td = dt
# days are probably fine
dec_seconds = td.seconds + 1e-6 * td.microseconds
# the correct number of time quanta
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000 // datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
return datetime.timedelta( days=td.days,
seconds = new_seconds,
microseconds = new_microseconds )
else:
# same deal, but the fields have slightly different names
# And the integer arithmetic cannot be used to count absolute seconds -
# that will overflow 32-bit ints (okay with 64, but better not
# to assume 64-bit ints are available)
dec_seconds = dt.second + 1e-6 * dt.microsecond
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000// datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
# to handle the carries between microseconds, seconds, days,
# construct an exact timedelta object - also avoids having to do
# int arithmetic with seconds over many days, which could overflow.
td = datetime.timedelta(seconds = new_seconds - dt.second,
microseconds = new_microseconds - dt.microsecond)
return dt + td
# certainly there is a better way to do this...
MultiBC=hm.MultiBC
StageBC=hm.StageBC
FlowBC=hm.FlowBC
VelocityBC=hm.VelocityBC
ScalarBC=hm.ScalarBC
SourceSinkBC=hm.SourceSinkBC
OTPSStageBC=hm.OTPSStageBC
OTPSFlowBC=hm.OTPSFlowBC
OTPSVelocityBC=hm.OTPSVelocityBC
HycomMultiVelocityBC=hm.HycomMultiVelocityBC
HycomMultiScalarBC=hm.HycomMultiScalarBC
NOAAStageBC=hm.NOAAStageBC
NwisFlowBC=hm.NwisFlowBC
NwisStageBC=hm.NwisStageBC
CdecFlowBC=hm.CdecFlowBC
CdecStageBC=hm.CdecStageBC
class GenericConfig(object):
""" Handles reading and writing of suntans.dat formatted files.
Older code I think was case-insensitive, but seems that it is
now case-sensitive.
"""
keys_are_case_sensitive=True
def __init__(self,filename=None,text=None):
""" filename: path to file to open and parse
text: a string containing the entire file to parse
"""
self.filename = filename
if filename:
fp = open(filename,'rt')
else:
fp = [s+"\n" for s in text.split("\n")]
self.entries = {}
self.originals = []
for line in fp:
# save original text so we can write out a new suntans.dat with
# only minor changes
self.originals.append(line)
i = len(self.originals)-1
m = re.match(r"^\s*((\S+)\s+(\S+))?\s*.*",line)
if m and m.group(1):
key = m.group(2)
if not self.keys_are_case_sensitive:
key=key.lower()
val = m.group(3)
self.entries[key] = [val,i]
if filename:
fp.close()
def copy(self):
# punt copy semantics and handling to copy module
return copy.deepcopy(self)
def conf_float(self,key):
return self.conf_str(key,float)
def conf_int(self,key,default=None):
x=self.conf_str(key,int)
if x is None:
return default
return x
def conf_str(self,key,caster=lambda x:x):
if not self.keys_are_case_sensitive:
key = key.lower()
if key in self.entries:
return caster(self.entries[key][0])
else:
return None
def __setitem__(self,key,value):
self.set_value(key,value)
def __getitem__(self,key):
return self.conf_str(key)
def __delitem__(self,key):
# if the line already exists, it will be written out commented, otherwise
# it won't be written at all.
self.set_value(key,None)
def __contains__(self,key):
return self[key] is not None
def get(self,key,default=None):
if key in self:
return self[key]
else:
return default
def __eq__(self,other):
return self.is_equal(other)
def is_equal(self,other,limit_to_keys=None):
# key by key equality comparison:
log.debug("Comparing two configs")
for k in self.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in other.entries:
log.debug("Other is missing key %s"%k)
return False
elif self.val_to_str(other.entries[k][0]) != self.val_to_str(self.entries[k][0]):
log.debug("Different values key %s => %s, %s"%(k,self.entries[k][0],other.entries[k][0]))
return False
for k in other.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in self.entries:
log.debug("other has extra key %s"%k)
return False
return True
def disable_value(self,key):
if not self.keys_are_case_sensitive:
key = key.lower()
if key not in self.entries:
return
old_val,i = self.entries[key]
self.originals[i] = "# %s"%(self.originals[i])
self.entries[key][0] = None
def val_to_str(self,value):
# make sure that floats are formatted with plenty of digits:
# and handle annoyance of standard Python types vs. numpy types
# But None stays None, as it gets handled specially elsewhere
if value is None:
return None
if isinstance(value,float) or isinstance(value,np.floating):
value = "%.12g"%value
else:
value = str(value)
return value
def set_value(self,key,value):
""" Update a value in the configuration. Setting an item to None will
comment out the line if it already exists, and omit the line if it does
not yet exist.
"""
if not self.keys_are_case_sensitive:
key = key.lower()
else:
if (key not in self.entries):
for other in self.entries:
if key.lower()==other.lower():
raise Exception("Probably a case-sensitive error: %s vs %s"%(key,other))
if key not in self.entries:
if value is None:
return
self.originals.append("# blank #")
i = len(self.originals) - 1
self.entries[key] = [None,i]
old_val,i = self.entries[key]
value = self.val_to_str(value)
if value is not None:
self.originals[i] = "%s %s # from sunreader code\n"%(key,value)
else:
self.originals[i] = "# " + self.originals[i]
self.entries[key][0] = value
def write_config(self,filename=None,check_changed=True,backup=True):
"""
Write this config out to a text file
filename: defaults to self.filename
check_changed: if True, and the file already exists and is not materially different,
then do nothing. Good for avoiding unnecessary changes to mtimes.
backup: if true, copy any existing file to <filename>.bak
"""
filename = filename or self.filename
if filename is None:
raise Exception("No clue about the filename for writing config file")
if check_changed:
if os.path.exists(filename):
existing_conf = self.__class__(filename)
if existing_conf == self:
log.debug("No change in config")
return
if os.path.exists(filename) and backup:
filename_bak = filename + ".bak"
os.rename(filename,filename_bak)
fp = open(filename,'wt')
for line in self.originals:
fp.write(line)
fp.close()
class SunConfig(GenericConfig):
def time_zero(self):
""" return python datetime for the when t=0 is"""
# try the old way, where these are separate fields:
start_year = self.conf_int('start_year')
start_day = self.conf_float('start_day')
if start_year is not None:
# Note: we're dealing with 0-based start days here.
start_datetime = datetime.datetime(start_year,1,1,tzinfo=utc) + dt_round(datetime.timedelta(start_day))
return start_datetime
# That failed, so try the other way
log.debug("Trying the new way of specifying t0")
s = self.conf_str('TimeZero') # 1999-01-01-00:00
start_datetime = datetime.datetime.strptime(s,'%Y-%m-%d-%H:%M')
start_datetime = start_datetime.replace(tzinfo=utc)
return start_datetime
def simulation_seconds(self):
return self.conf_float('dt') * self.conf_int('nsteps')
def timestep(self):
""" Return a timedelta object for the timestep - should be safe from roundoff.
"""
return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )
def simulation_period(self):
""" This is more naive than the SunReader simulation_period(), in that
it does *not* look at any restart information, just start_year, start_day,
dt, and nsteps
WARNING: this used to add an extra dt to start_date - maybe trying to make it
the time of the first profile output?? this seems like a bad idea. As of
Nov 18, 2012, it does not do that (and at the same time, moves to datetime
arithmetic)
return a pair of python datetime objects for the start and end of the simulation.
"""
t0 = self.time_zero()
# why did it add dt here???
# start_date = t0 + datetime.timedelta( self.conf_float('dt') / (24.*3600) )
# simulation_days = self.simulation_seconds() / (24.*3600)
# end_date = start_date + datetime.timedelta(simulation_days)
start_date = t0
end_date = start_date + self.conf_int('nsteps')*self.timestep()
return start_date,end_date
def copy_t0(self,other):
self.set_value('start_year',other.conf_int('start_year'))
self.set_value('start_day',other.conf_float('start_day'))
# def set_simulation_period(self,start_date,end_date):
# """ Based on the two python datetime instances given, sets
# start_day, start_year and nsteps
# """
# self.set_value('start_year',start_date.year)
# t0 = datetime.datetime( start_date.year,1,1,tzinfo=utc )
# self.set_value('start_day',date2num(start_date) - date2num(t0))
#
# # roundoff dangers here -
# # self.set_simulation_duration_days( date2num(end_date) - date2num(start_date))
# self.set_simulation_duration(delta=(end_date - start_date))
#
# def set_simulation_duration_days(self,days):
# self.set_simulation_duration(days=days)
# def set_simulation_duration(self,
# days=None,
# delta=None,
# seconds = None):
# """ Set the number of steps for the simulation - exactly one of the parameters should
# be specified:
# days: decimal number of days - DANGER - it's very easy to get some round-off issues here
# delta: a datetime.timedelta object.
# hopefully safe, as long as any differencing between dates was done with UTC dates
# (or local dates with no daylight savings transitions)
# seconds: total number of seconds - this should be safe, though there are some possibilities for
# roundoff.
#
# """
# print("Setting simulation duration:")
# print(" days=",days)
# print(" delta=",delta)
# print(" seconds=",seconds)
#
# # convert everything to a timedelta -
# if (days is not None) + (delta is not None) + (seconds is not None) != 1:
# raise Exception("Exactly one of days, delta, or seconds must be specified")
# if days is not None:
# delta = datetime.timedelta(days=days)
# elif seconds is not None:
# delta = datetime.timedelta(seconds=seconds)
#
# # assuming that dt is also a multiple of the precision (currently 10ms), this is
# # safe
# delta = dt_round(delta)
# print(" rounded delta = ",delta)
# timestep = dt_round(datetime.timedelta(seconds=self.conf_float('dt')))
# print(" rounded timestep =",timestep)
#
# # now we have a hopefully exact simulation duration in integer days, seconds, microseconds
# # and a similarly exact timestep
# # would like to do this:
# # nsteps = delta / timestep
# # but that's not supported until python 3.3 or so
# def to_quanta(td):
# """ return integer number of time quanta in the time delta object
# """
# us_per_quanta = 1000000 // datenum_precision_per_s
# return (td.days*86400 + td.seconds)*datenum_precision_per_s + \
# int( round( td.microseconds/us_per_quanta) )
# quanta_timestep = to_quanta(timestep)
# quanta_delta = to_quanta(delta)
#
# print(" quanta_timestep=",quanta_timestep)
# print(" quanta_delta=",quanta_delta)
# nsteps = quanta_delta // quanta_timestep
#
# print(" nsteps = ",nsteps)
# # double-check, going back to timedelta objects:
# err = nsteps * timestep - delta
# self.set_value('nsteps',int(nsteps))
# print("Simulation duration requires %i steps (rounding error=%s)"%(self.conf_int('nsteps'),err))
def is_grid_compatible(self,other):
""" Compare two config's, and return False if any parameters which would
affect grid partitioning/celldata/edgedata/etc. are different.
Note that differences in other input files can also cause two grids to be different,
esp. vertspace.dat
"""
# keep all lowercase
keys = ['Nkmax',
'stairstep',
'rstretch',
'CorrectVoronoi',
'VoronoiRatio',
'vertgridcorrect',
'IntDepth',
'pslg',
'points',
'edges',
'cells',
'depth',
# 'vertspace.dat.in' if rstretch==0
'topology.dat',
'edgedata',
'celldata',
'vertspace.dat']
return self.is_equal(other,limit_to_keys=keys)
class SuntansModel(hm.HydroModel):
# Annoying, but suntans doesn't like signed elevations
# this offset will be applied to grid depths and freesurface boundary conditions.
# This is error prone, though, and makes it difficult to "round-trip"
# grid information. In particular, if a new run is created by loading an old
# run, there will be an issue where the grid may get z_offset applied twice.
# This should be reimplemented as a z_datum. So no behind-the-scenes offsets,
# just have a standardized place for saying that my model's z=0 is z_offset
# from z_datum, e.g. z_datum='NAVD88' and z_offset.
# maybe the appropriate thing is a dictionary, mapping datum names to offsets.
# like z_datum['NAVD88']=-5.
z_offset=0.0
ic_ds=None
met_ds=None
# None: not a restart, or
# path to suntans.dat for the run being restarted, or True if this is
# a restart but we don't we have a separate directory for the restart,
# just StartFiles
restart=None
restart_model=None # model instance being restarted
restart_symlink=True # default to symlinking restarts
# for partition, run, etc.
sun_bin_dir=None
mpi_bin_dir=None
# 'auto': the grid and projection information will be used to
# update the coriolis parameter.
# None: leave whatever value is in the template
# <float>: use that as the coriolis parameter
coriolis_f='auto'
# experimental -- not yet working.
# the suntans code does not yet remap edge data from the original
# order to the -g ordering (which is different, even when running
# single-core).
use_edge_depths=False # write depth data per-edge in a separate file.
def __init__(self):
super(SuntansModel,self).__init__()
self.load_template(os.path.join(os.path.dirname(__file__),"data","suntans.dat"))
@property
def time0(self):
self.config['starttime']
dt=datetime.datetime.strptime(self.config['starttime'],
"%Y%m%d.%H%M%S")
return utils.to_dt64(dt)
def create_restart(self,symlink=True):
new_model=self.__class__() # in case of subclassing
# SuntansModel()
new_model.config=self.config.copy()
# things that have to match up, but are not part of the config:
new_model.num_procs=self.num_procs
new_model.restart=self.config_filename
new_model.restart_model=self
new_model.restart_symlink=symlink
# There is some extra machinery in load_grid(...) to get the right cell and
# edge depths -- this call would lose those
# new_model.set_grid(unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir))
# So copy the grid we already have.
# UnstructuredGrid.copy() is naive and doesn't get all the depth fields, so
# here just pass self.grid, even though it may get mutated.
new_model.set_grid(self.grid)
new_model.run_start=self.restartable_time()
return new_model
@classmethod
def run_completed(cls,fn):
"""
fn: path to either folder containing suntans.dat, or path
to suntans.dat itself.
returns: True if the file exists and the folder contains a run which
ran to completion. Otherwise False.
"""
if not os.path.exists(fn):
return False
if os.path.isdir(fn):
fn=os.path.join(fn,"suntans.dat")
if not os.path.exists(fn):
return False
model=cls.load(fn)
if model is None:
return False
return model.is_completed()
def is_completed(self):
step_fn=os.path.join(self.run_dir,self.config['ProgressFile'])
if not os.path.exists(step_fn):
return False
with open(step_fn,'rt') as fp:
progress=fp.read()
return "100% Complete" in progress
def set_grid(self,grid):
"""
read/load grid, check for depth data and edge marks.
This does not apply the z_offset -- that is only
applied during writing out the rundata.
"""
if isinstance(grid,six.string_types):
# step in and load as suntans, rather than generic
grid=unstructured_grid.SuntansGrid(grid)
# depending on the source of the grid, it may need edges flipped
# to be consistent with suntans expectations that nc1 is always into
# the domain, and nc2 may be external
grid.orient_edges()
super(SuntansModel,self).set_grid(grid)
# 2019-05-29: trying to transition to using z for elevation, since
# 'depth' has a positive-down connotation
# make sure we have the fields expected by suntans
if 'z_bed' not in grid.cells.dtype.names:
if 'depth' in grid.cells.dtype.names:
self.log.warning("For now, assuming that cells['depth'] is positive up")
cell_z_bed=grid.cells['depth']
elif 'z_bed' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['z_bed'])
# and avoid overlapping names
grid.delete_node_field('z_bed')
elif 'depth' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['depth'])
self.log.warning("For now, assuming that nodes['depth'] is positive up")
else:
self.log.warning("No depth information in grid nodes or cells. Creating zero-depth")
cell_z_bed=np.zeros(grid.Ncells(),np.float64)
grid.add_cell_field('z_bed',cell_z_bed)
# with the current suntans version, depths are on cells, but model driver
# code in places wants an edge depth. so copy those here.
e2c=grid.edge_to_cells() # this is assumed in other parts of the code that do not recalculate it.
nc1=e2c[:,0].copy() ; nc2=e2c[:,1].copy()
nc1[nc1<0]=nc2[nc1<0] ; nc2[nc2<0]=nc1[nc2<0]
# edge depth is shallower of neighboring cells
# these depths are still positive up, though.
edge_z_bed=np.maximum(grid.cells['z_bed'][nc1],grid.cells['z_bed'][nc2])
if 'edge_z_bed' in grid.edges.dtype.names:
deep_edges=(grid.edges['edge_z_bed']<edge_z_bed)
if np.any(deep_edges):
self.log.info("%d edges had a specified depth deeper than neighboring cells. Replaced them"%
deep_edges.sum())
grid.edges['edge_z_bed'][deep_edges]=edge_z_bed[deep_edges]
else:
grid.add_edge_field('edge_z_bed',edge_z_bed)
if 'mark' not in grid.edges.dtype.names:
mark=np.zeros( grid.Nedges(), np.int32)
grid.add_edge_field('mark',mark)
self.grid=grid
self.set_default_edge_marks()
def set_default_edge_marks(self):
# update marks to a reasonable starting point
e2c=self.grid.edge_to_cells()
bc_edge=e2c.min(axis=1)<0
mark=self.grid.edges['mark']
mark[mark<0] = 0
mark[ (mark==0) & bc_edge ] = 1
# allow other marks to stay
self.grid.edges['mark'][:]=mark
def edge_depth(self,j,datum=None):
"""
Return the bed elevation for edge j, in meters, positive=up.
Suntans implementation relies on set_grid() having set edge depths
to be the min. of neighboring cells
"""
z=self.grid.edges['edge_z_bed'][j]
if datum is not None:
if datum=='eta0':
z+=self.initial_water_level()
return z
@classmethod
def load(cls,fn,load_grid=True,load_met=False,load_ic=False,load_bc=False):
"""
Open an existing model setup, from path to its suntans.dat
return None if run could not be loaded.
load_met: if true, load an existing Met netcdf file to self.met_ds
load_ic: likewise for initial conditions
load_bc: likewise for boundary conditions
"""
model=cls()
if os.path.isdir(fn):
fn=os.path.join(fn,'suntans.dat')
if not os.path.exists(fn):
return None
model.load_template(fn)
model.set_run_dir(os.path.dirname(fn),mode='existing')
# infer number of processors based on celldata files
# for restarts, this is overridden in infer_restart() by looking
# at the number of restart files, since in some scripts those
# are created earlier, while the celldata files aren't created until
# partition is called.
sub_cells=glob.glob( os.path.join(model.run_dir,'celldata.dat.*') )
if len(sub_cells)>0:
model.num_procs=len(sub_cells)
else:
# probably better to test whether it has even been processed
model.num_procs=1
model.infer_restart()
model.set_times_from_config()
# This will need some tweaking to fail gracefully
if load_grid:
try:
model.load_grid()
except OSError:
# this may be too strict -- a multiproc run could be fine but not
# necessarily have the global grid.
return None
if load_met:
model.load_met_ds()
if load_ic:
model.load_ic_ds()
if load_bc:
model.load_bc_ds()
return model
def load_grid(self):
"""
Set self.grid from existing suntans-format grid in self.run_dir.
"""
g=unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir)
# hacked in support to read cell depths
cell_depth_fn=self.file_path('depth')+"-voro"
if ( ('z_bed' not in g.cells.dtype.names)
and
(os.path.exists(cell_depth_fn)) ):
self.log.debug("Will read cell depths, too")
cell_xyz=np.loadtxt(cell_depth_fn)
assert cell_xyz.shape[0]==g.Ncells(),"%s didn't have the right number of cells (%d vs %d)"%(cell_depth_fn,
cell_xyz.shape[0],
g.Ncells())
# cell centers can be a bit lenient in case there are centroid vs. circumcenter vs nudged
# differences.
if not np.allclose(cell_xyz[:,:2], g.cells_center()):
self.log.warning("%s cell locations don't match grid"%cell_depth_fn)
self.log.warning("Will forge ahead nevertheless")
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...)
g.add_cell_field('z_bed',-cell_xyz[:,2])
g.add_cell_field('depth',-cell_xyz[:,2]) # will be phased out
# hacked in support to read depth on edges
edge_depth_fn=self.file_path('depth')+"-edge"
if ( ('edge_z_bed' not in g.edges.dtype.names)
and
(os.path.exists(edge_depth_fn)) ):
self.log.debug("Will read edge depths, too")
edge_xyz=np.loadtxt(edge_depth_fn)
assert edge_xyz.shape[0]==g.Nedges(),"%s didn't have the right number of edges (%d vs %d)"%(edge_depth_fn,
edge_xyz.shape[0],
g.Nedges())
assert np.allclose(edge_xyz[:,:2], g.edges_center()),"%s edge locations don't match"%edge_depth_fn
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...) -- in the process of using edge_z_bed in the driver r
# script to make the sign convention more apparent.
# g.add_edge_field('edge_depth',-edge_xyz[:,2]) # being phased out
g.add_edge_field('edge_z_bed',-edge_xyz[:,2])
self.set_grid(g)
return g
def infer_restart(self):
"""
See if this run is a restart.
Sets self.restart to:
None: not a restart
True: is a restart, but insufficient information to find the parent run
string: path to suntans.dat for the parent run
"""
if self.config['StartFile'] is None:
# Possibly not a valid config file
self.restart=None
return
start_path=os.path.join(self.run_dir,self.config['StartFile']+".0")
if os.path.exists(start_path):
log.debug("Looks like a restart")
self.restart=True
# Get num_procs from the number of restart files.
for proc in range(1024):
fn=os.path.join(self.run_dir,self.config['StartFile']+".%d"%proc)
if not os.path.exists(fn):
break
self.num_procs=proc
log.debug("Restart appears to have %d subdomains"%self.num_procs)
if os.path.islink(start_path):
start_path=os.path.realpath(start_path)
parent_dir=os.path.dirname(start_path)
assert not os.path.samefile(parent_dir,self.run_dir)
parent_sun=os.path.join(parent_dir,"suntans.dat")
if os.path.exists(parent_sun):
log.debug("And the previous suntans.dat: %s"%parent_sun)
self.restart=parent_sun
else:
log.info("Checked for %s but no luck"%parent_sun)
else:
log.info("Restart file %s is not a link"%start_path)
else:
log.debug("Does not look like a restart based on %s"%start_path)
self.restart=None
def chain_restarts(self,count=None,load_grid=False):
"""
return a list of up to count (None: unlimited) Model instances
in forward chronological order of consecutive restarts.
load_grid: defaults to *not* loading the grid of the earlier runs.
The last item is always self.
count: either the count of how many runs to return, or a np.datetime64
such that we'll go back to a run covering that date if possible.
if this is a tuple of datetimes, only return the runs covering that time
range.
"""
runs=[self]
run=self
while 1:
if isinstance(count,np.datetime64):
if runs[0].run_start <=count:
break
elif isinstance(count,tuple):
if runs[0].run_start < count[0]:
break
elif count and len(runs)>=count:
break
run.infer_restart()
if run.restart and run.restart is not True:
run=SuntansModel.load(run.restart,load_grid=load_grid)
runs.insert(0,run)
else:
break
if isinstance(count,tuple):
# Trim runs coming after the requested period
runs=[run for run in runs if run.run_start<count[1]]
if len(runs)==0:
log.warning("chain_restarts wound up with zero runs for count=%s"%str(count))
return runs
def chain_start(self,count=None):
"""
Analog of run_start, but across chained restarts.
count is passed to chain_restarts().
"""
runs=self.chain_restarts(count=count)
return runs[0].run_start
def chain_stop(self,count=None):
"""
Analog of run_stop, but across chained restarts.
Included for completeness, but this is always the same
as self.run_stop (since we don't chain forward in time).
"""
return self.run_stop
def load_template(self,fn):
self.template_fn=fn
self.config=SunConfig(fn)
def set_run_dir(self,path,mode='create'):
assert mode!='clean',"Suntans driver doesn't know what clean is"
return super(SuntansModel,self).set_run_dir(path,mode)
def file_path(self,key,proc=None):
fn=os.path.join(self.run_dir,self.config[key])
if proc is not None:
fn+=".%d"%proc
return fn
@property
def config_filename(self):
return os.path.join(self.run_dir,"suntans.dat")
def write_config(self):
log.info("Writing config to %s"%self.config_filename)
self.config.write_config(self.config_filename)
def write_monitor(self):
if not self.mon_points: return
xys=[ np.array(feat['geom']) for feat in self.mon_points]
valid_xys=[xy
for xy in xys
if self.grid.select_cells_nearest(xy,inside=True) is not None]
np.savetxt( os.path.join(self.run_dir,self.config['DataLocations']),
np.array(valid_xys) )
def write(self):
self.update_config()
self.write_config()
self.write_monitor()
self.write_extra_files()
self.write_forcing()
# Must come after write_forcing() to allow BCs to modify grid
self.write_grid()
# Must come after write_forcing(), to get proper grid and to
# have access to freesurface BCs
if self.restart:
self.log.info("Even though this is a restart, write IC")
# There are times that it is useful to be able to read the IC
# back in, e.g. to set a boundary condition equal to its initial
# condition. For a restart, this would ideally be the same state
# as in the StartFiles. That's going to take some work for
# relatively little gain. So just create the IC as if this was
# not a restart.
self.write_ic()
if self.restart:
self.write_startfiles()
def initialize_initial_condition(self):
"""
Populate self.ic_ds with a baseline initial condition.
This should be called after all boundary conditions are in place.
"""
self.ic_ds=self.zero_initial_condition()
self.set_initial_h_from_bc()
def write_ic(self):
"""
Will have to think about how best to order this -- really need
to set this as a zero earlier on, and then have some known time
for the script to modify it, before finally writing it out here.
"""
# Creating an initial condition netcdf file:
if self.ic_ds is None:
self.initialize_initial_condition()
self.write_ic_ds()
def write_startfiles(self):
src_base=os.path.join(os.path.dirname(self.restart),
self.restart_model.config['StoreFile'])
dst_base=os.path.join(self.run_dir,self.config['StartFile'])
for proc in range(self.num_procs):
src=src_base+".%d"%proc
dst=dst_base+".%d"%proc
self.restart_copier(src,dst)
def copy_ic_to_bc(self,ic_var,bc_var):
"""
Copy IC values to the boundary conditions
Copies data for the given IC variable (e.g. 'salt'), to
open and flow boundaries for bc_var (e.g. 'S').
for flow boundaries, 'boundary_' is prepended to bc_var.
The initial condition is copied into bc_ds for all time steps,
and all layers.
"""
# Open boundaries
for ci,c in enumerate(utils.progress(self.bc_ds.cellp.values,msg="IC=>Open BCs")):
ic_values = self.ic_ds[ic_var].values[0,:,c]
self.bc_ds[bc_var].isel(Ntype3=ci).values[:,:]=ic_values[None,:]
# Flow boundaries
for ei,e in enumerate(utils.progress(self.bc_ds.edgep.values,msg="IC=>Flow BCs")):
c=self.grid.edges['cells'][e,0]
assert c>=0,"Is this edge flipped"
ic_values=self.ic_ds[ic_var].values[0,:,c]
self.bc_ds["boundary_"+bc_var].isel(Ntype2=ei).values[:,:]=ic_values[None,:]
def write_ic_ds(self):
self.ic_ds.to_netcdf( os.path.join(self.run_dir,self.config['initialNCfile']) )
def load_ic_ds(self):
fn=os.path.join(self.run_dir,self.config['initialNCfile'])
if not os.path.exists(fn): return False
self.ic_ds=xr.open_dataset(fn)
def set_initial_h_from_bc(self):
"""
prereq: self.bc_ds has been set.
"""
if len(self.bc_ds.Ntype3)==0:
log.warning("Cannot set initial h from BC because there are no type 3 edges")
return
time_i=np.searchsorted(self.bc_ds.time.values,self.run_start)
# both bc_ds and ic_ds should already incorporate the depth offset, so
# no further adjustment here.
h=self.bc_ds.h.isel(Nt=time_i).mean().values
# this is positive down, already shifted, clipped.
#cell_depths=self.ic_ds['dv'].values
# This led to drying issues in 3D, and ultimately was not the fix
# for issues in 2D
#self.ic_ds.eta.values[:]=np.maximum(h,-cell_depths)
self.ic_ds.eta.values[...]=h
log.info("Setting initial eta from BCs, value=max(z_bed,%.4f) (including z_offset of %.2f)"%(h,self.z_offset))
def write_forcing(self,overwrite=True):
# these map to lists of BCs, in case there are BC with mode='add'
# map edge to BC data
self.bc_type2=defaultdict(lambda: defaultdict(list)) # [<edge index>][<variable>]=>[DataArray,...]
# map cell to BC data
self.bc_type3=defaultdict(lambda: defaultdict(list)) # [<cell index>][<variable>]=>[DataArray,...]
# Flow BCs are handled specially since they apply across a group of edges
# Each participating edge should have an entry in bc_type2,
# [<edge index>]["Q"]=>"segment_name"
# and a corresponding entry in here:
self.bc_type2_segments=defaultdict(lambda: defaultdict(list)) # [<segment name>][<variable>]=>[DataArray,...]
# point sources.
# indexed by a tuple of (cell,k)
# [(cell,k][<variable>] => [DataArray]
self.bc_point_sources=defaultdict(lambda: defaultdict(list))
super(SuntansModel,self).write_forcing()
# Get a time series that's the superset of all given timeseries
all_times=[]
# edge, cells, groups of edges
for bc_typ in [self.bc_type2,self.bc_type3,self.bc_type2_segments]:
for bc in bc_typ.values(): # each edge idx/cell idx/segment name
for vlist in bc.values(): # each variable on that edge/cell/segment
for v in vlist: #list of BCs for this variable on this element
if isinstance(v,six.string_types):
# type2 edges which reference a segment have no
# time series of their own.
continue
if 'time' in v.dims:
all_times.append( v['time'].values )
if all_times:
common_time=np.unique(np.concatenate(all_times))
else:
# no boundary conditions have times, so fabricate.
common_time=np.array( [self.run_start,self.run_stop] )
# Make sure that brackets the run:
pad=np.timedelta64(1,'D')
if common_time[0]>=self.run_start:
common_time=np.concatenate(( [self.run_start-pad],
common_time ))
# make sure there are *two* times beyond the end for quadratic
# interpolation
while len(common_time)<3 or common_time[-2]<=self.run_stop:
if common_time[-1]<self.run_stop+pad:
new_time=self.run_stop+pad
else:
new_time=common_time[-1]+pad
common_time=np.concatenate((common_time,[new_time]))
# SUNTANS applies quadratic interpolation in time, so it requires at least
# 3 time values - seems that it wants one time before and two times after
# the current time.
assert len(common_time)>2
self.bc_time=common_time
self.bc_ds=self.compile_bcs()
self.write_bc_ds()
if self.met_ds is None:
self.met_ds=self.zero_met()
self.write_met_ds()
def ds_time_units(self):
"""
setting for how to write time to netcdf
specifically as suntans expects. pass as
...
encoding=dict(time={'units':self.ds_time_units()}),
...
in xarray dataset to_netcdf(..)
"""
basetime=self.config['basetime']
assert len(basetime)==15 # YYYYMMDD.hhmmss
time_units="seconds since %s-%s-%s %s:%s:%s"%(basetime[0:4],
basetime[4:6],
basetime[6:8],
basetime[9:11],
basetime[11:13],
basetime[13:15])
return time_units
def write_bc_ds(self):
self.bc_ds.to_netcdf( os.path.join(self.run_dir,
self.config['netcdfBdyFile']),
encoding=dict(time={'units':self.ds_time_units()}))
def load_bc_ds(self):
fn=os.path.join(self.run_dir,
self.config['netcdfBdyFile'])
if not os.path.exists(fn): return False
self.bc_ds=xr.open_dataset(fn)
return self.bc_ds
def write_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if os.path.exists(fn):
log.info("Will replace %s"%fn)
os.unlink(fn)
else:
log.debug("Writing met ds to %s"%fn)
log.debug(str(self.met_ds))
self.met_ds.to_netcdf( fn,
encoding=dict(nt={'units':self.ds_time_units()},
Time={'units':self.ds_time_units()}) )
def load_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if not os.path.exists(fn): return False
self.met_ds=xr.open_dataset(fn)
def layer_data(self,with_offset=False,edge_index=None,cell_index=None,z_bed=None):
"""
Returns layer data without z_offset applied, and
positive up.
with no additional arguments, returns global information. edge_index or
cell_index will use a z_bed based on that element. z_bed is used to clip
z layers. z_bed should be a positive-up quantity. A specified z_bed
takes precendece over edge_index or cell_index.
Returns a xr.Dataset
with z_min, z_max, Nk, z_interface, z_mid.
z_interface and z_mid are ordered surface to bed.
if with_offset is True, the z_offset is included, which yields
more accurate (i.e. similar to suntans) layers when there is stretching
"""
if z_bed is None:
if edge_index is not None:
z_bed=self.grid.edge_depths()[edge_index]
elif cell_index is not None:
z_bed=self.grid.cell_depths()[cell_index]
Nk=int(self.config['Nkmax'])
z_min=self.grid.cells['z_bed'].min() # bed
z_max=self.grid.cells['z_bed'].max() # surface
r=float(self.config['rstretch'])
if with_offset:
z_min-=self.z_offset
z_max=0
depth=-z_min # positive:down
dzs=np.zeros(Nk, np.float64)
if r>1.0:
dzs[0]=depth*(r-1)/(r**Nk-1)
for k in range(1,Nk):
dzs[k]=r*dzs[k-1]
else:
dzs[:]=depth/float(Nk)
z_interface=np.concatenate( ( [z_max],
z_max-np.cumsum(dzs) ) )
z_mid=0.5*(z_interface[:-1]+z_interface[1:])
ds=xr.Dataset()
ds['z_min']=(),z_min
ds['z_max']=(),z_max
ds['z_interface']=('Nkp1',),z_interface
ds['z_mid']=('Nk',),z_mid
for v in ['z_min','z_max','z_interface','z_mid']:
ds[v].attrs['positive']='up'
return ds
def compile_bcs(self):
"""
Postprocess the information from write_forcing()
to create the BC netcdf dataset.
Note that bc_ds includes the z_offset.
"""
ds=xr.Dataset()
layers=self.layer_data()
Nk=layers.dims['Nk']
ds['z']=('Nk',),-(layers.z_mid.values + self.z_offset)
# suntans assumes that this dimension is Nt, not time
Nt=len(self.bc_time)
ds['time']=('Nt',),self.bc_time
# Scalars will introduce type3 and type2 because they may not know
# what type of flow forcing is there. Here we skim out scalars that
# do not have an associated h (type3) or flow (type2) boundary
# the list(...keys()) part is to make a copy, so the del's
# don't upset the iteration
for cell in list(self.bc_type3.keys()):
if 'h' not in self.bc_type3[cell]:
del self.bc_type3[cell]
# 'u' 'v' and 'Q' for type2
for edge in list(self.bc_type2.keys()):
if not ( ('u' in self.bc_type2[edge]) or
('v' in self.bc_type2[edge]) or
('Q' in self.bc_type2[edge])):
del self.bc_type2[edge]
Ntype3=len(self.bc_type3)
ds['cellp']=('Ntype3',),np.zeros(Ntype3,np.int32)-1
ds['xv']=('Ntype3',),np.zeros(Ntype3,np.float64)
ds['yv']=('Ntype3',),np.zeros(Ntype3,np.float64)
# the actual data variables for type 3:
ds['uc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['vc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['wc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['T']=('Nt','Nk','Ntype3',),20*np.ones((Nt,Nk,Ntype3),np.float64)
ds['S']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['h']=('Nt','Ntype3'),np.zeros( (Nt, Ntype3), np.float64 )
def interp_time(da):
if 'time' not in da.dims: # constant value
# this should do the right thing for both scalar and vector
# values
return da.values * np.ones( (Nt,)+da.values.shape )
if da.ndim==2:
assert da.dims[0]=='time'
# recursively call per-layer, which is assumed to be the second
# dimension
profiles=[ interp_time(da[:,i]) for i in range(da.shape[1]) ]
return np.vstack(profiles).T
return np.interp( utils.to_dnum(ds.time.values),
utils.to_dnum(da.time.values), da.values )
import time
elapsed=[0.0]
def combine_items(values,bc_items,offset=0.0):
base_item=None
# include the last mode='overwrite' bc, and sum the mode='add'
# bcs.
values[:]=offset
# aside from Q and h, other variables are 3D, which means
# that if the data comes back 2D, pad out the layer dimension
def pad_dims(data):
if values.ndim==2 and data.ndim==1:
return data[:,None] # broadcastable vertical dimension
else:
return data
for bc_item in bc_items:
if bc_item.mode=='add':
t0=time.time()
values[:] += pad_dims(interp_time(bc_item))
elapsed[0]+=time.time()-t0
else:
base_item=bc_item
if base_item is None:
self.log.warning("BC for cell %d has no overwrite items"%type3_cell)
else:
t0=time.time()
values[:] += pad_dims(interp_time(base_item))
elapsed[0]+=time.time()-t0
cc=self.grid.cells_center()
for type3_i,type3_cell in enumerate(self.bc_type3): # each edge/cell
ds['cellp'].values[type3_i]=type3_cell
ds['xv'].values[type3_i]=cc[type3_cell,0]
ds['yv'].values[type3_i]=cc[type3_cell,1]
bc=self.bc_type3[type3_cell]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0
# will set bc values in place
combine_items(ds[v].isel(Ntype3=type3_i).values,
bc[v],
offset=offset)
Ntype2=len(self.bc_type2)
Nseg=len(self.bc_type2_segments)
ds['edgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['xe']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['ye']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['boundary_h']=('Nt','Ntype2'),np.zeros( (Nt, Ntype2), np.float64) + self.z_offset
ds['boundary_u']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_v']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_w']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_T']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_S']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_Q']=('Nt','Nseg'),np.zeros( (Nt, Nseg), np.float64)
# Iterate over segments first, so that edges below can grab the correct
# index.
segment_names=list(self.bc_type2_segments.keys()) # this establishes the order of the segments
# make this distinct from 0 or 1 to aid debugging
segment_ids=100 + np.arange(len(segment_names))
ds['seg_name']=('Nseg',),segment_names # not read by suntans, but maybe helps debugging
ds['segedgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['segp']=('Nseg',),segment_ids # np.arange(Nseg,dtype=np.int32)
for seg_i,seg_name in enumerate(segment_names):
bc=self.bc_type2_segments[seg_name]
for v in bc.keys(): # only Q, but stick to the same pattern
combine_items(ds['boundary_'+v].isel(Nseg=seg_i).values,
bc[v])
ec=self.grid.edges_center()
for type2_i,type2_edge in enumerate(self.bc_type2): # each edge
ds['edgep'].values[type2_i]=type2_edge
ds['xe'].values[type2_i]=ec[type2_edge,0]
ds['ye'].values[type2_i]=ec[type2_edge,1]
bc=self.bc_type2[type2_edge]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0.0
if v!='Q':
combine_items(ds['boundary_'+v].isel(Ntype2=type2_i).values,
bc[v],offset)
else:
seg_name=bc[v]
# too lazy to work through the right way to deal with combined
# bcs for Q right now, so just warn the user that it may be
# a problem.
if len(seg_name)!=1:
log.warning("Only tested with a single value, but got %s"%str(seg_name))
seg_name=seg_name[0]
seg_idx=segment_ids[segment_names.index(seg_name)]
ds['segedgep'].values[type2_i] = seg_idx
# -- Set grid marks --
for c in ds.cellp.values:
assert c>=0
for j in self.grid.cell_to_edges(c):
j_cells=self.grid.edge_to_cells(j)
if j_cells.min()<0:# boundary
self.grid.edges['mark'][j]=3 # set to type 3
for j in ds.edgep.values:
assert j>=0,"Some edge pointers did not get set"
self.grid.edges['mark'][j]=2
# --- Point source code ---
Npoint=len(self.bc_point_sources)
ds['point_cell']=('Npoint',), np.zeros(Npoint,np.int32) # point_cell
ds['point_layer']=('Npoint',), np.zeros(Npoint,np.int32) # point_layer
ds['point_Q']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_Q,axis=-1)
ds['point_S']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_S,axis=-1)
ds['point_T']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_T,axis=-1)
for pnt_idx,key in enumerate(self.bc_point_sources.keys()):
(c,k)=key
log.info("Point source for cell=%d, k=%d"%(c,k))
assert 'Q' in self.bc_point_sources[key]
combine_items(ds['point_Q'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['Q'])
ds['point_cell'].values[pnt_idx]=c
ds['point_layer'].values[pnt_idx]=k
# really shaky ground here..
if 'T' in self.bc_point_sources[key]:
combine_items( ds['point_T'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['T'] )
if 'S' in self.bc_point_sources[key]:
combine_items( ds['point_S'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['S'] )
# End new point source code
log.info("Total time in interp_time: %.3fs"%elapsed[0])
return ds
def write_bc(self,bc):
if isinstance(bc,hm.StageBC):
self.write_stage_bc(bc)
elif isinstance(bc,hm.SourceSinkBC):
self.write_source_sink_bc(bc)
elif isinstance(bc,hm.FlowBC):
self.write_flow_bc(bc)
elif isinstance(bc,hm.VelocityBC):
self.write_velocity_bc(bc)
elif isinstance(bc,hm.ScalarBC):
self.write_scalar_bc(bc)
else:
super(SuntansModel,self).write_bc(bc)
def write_stage_bc(self,bc):
water_level=bc.data()
assert len(water_level.dims)<=1,"Water level must have dims either time, or none"
cells=bc.grid_cells or self.bc_geom_to_cells(bc.geom)
for cell in cells:
self.bc_type3[cell]['h'].append(water_level)
def write_velocity_bc(self,bc):
# interface isn't exactly nailed down with the BC
# classes. whether the model wants vector velocity
# or normal velocity varies by model. could
# standardize on vector velocity, and project to normal
# here?
ds=bc.dataset()
edges=bc.grid_edges or self.bc_geom_to_edges(bc.geom)
for j in edges:
for comp in ['u','v']:
da=ds[comp]
da.attrs['mode']=bc.mode
self.bc_type2[j][comp].append(da)
def write_scalar_bc(self,bc):
da=bc.data()
scalar_name=bc.scalar
# canonicalize scalar names for suntans BC files
if scalar_name.lower() in ['salinity','salt','s']:
scalar_name='S'
elif scalar_name.lower() in ['temp','t','temperature']:
scalar_name='T'
else:
self.log.warning("Scalar %s is not S or T or similar"%scalar_name)
# scalars could be set on edges or cells, or points in cells
# this should be expanded to make more use of the information in bc.parent
# if that is set
if bc.geom.type=='Point':
self.log.info("Assuming that Point geometry on a scalar bc implies point source")
ck=self.bc_to_interior_cell_layer(bc) # (cell,layer) tuple
self.bc_point_sources[ck][scalar_name].append(da)
else:
# info is duplicated on type2 (flow) and type3 (stage) BCs, which
# is sorted out later.
for j in (bc.grid_edges or self.bc_geom_to_edges(bc.geom)):
self.bc_type2[j][scalar_name].append(da)
for cell in (bc.grid_cells or self.bc_geom_to_cells(bc.geom)):
self.bc_type3[cell][scalar_name].append(da)
def dredge_boundary(self,linestring,dredge_depth):
# Restarts appear to be making dredge calls. Not sure why.
print("Call to dredge_boundary, restart is",self.restart)
return super(SuntansModel,self).dredge_boundary(linestring,dredge_depth,
edge_field='edge_z_bed',
cell_field='z_bed')
def dredge_discharge(self,point,dredge_depth):
print("Call to dredge discharge, restart is",self.restart)
return super(SuntansModel,self).dredge_discharge(point,dredge_depth,
edge_field='edge_z_bed',
cell_field='z_bed')
def write_flow_bc(self,bc):
da=bc.data()
self.bc_type2_segments[bc.name]['Q'].append(da)
assert len(da.dims)<=1,"Flow must have dims either time, or none"
if (bc.dredge_depth is not None) and (self.restart is None):
log.info("Dredging grid for flow boundary %s"%bc.name)
self.dredge_boundary(np.array(bc.geom.coords),
bc.dredge_depth)
edges=(bc.grid_edges or self.bc_geom_to_edges(bc.geom))
for j in edges:
self.bc_type2[j]['Q'].append(bc.name)
def write_source_sink_bc(self,bc):
da=bc.data()
assert bc.geom.type=='Point',"Suntans driver does not support src/sink pair"
if (bc.dredge_depth is not None) and (self.restart is None):
# Additionally modify the grid to make sure there is a place for inflow to
# come in.
log.info("Dredging grid for source/sink BC %s"%bc.name)
self.dredge_discharge(np.array(bc.geom.coords),
bc.dredge_depth)
ck=self.bc_to_interior_cell_layer(bc) # (cell,layer) tuple
self.bc_point_sources[ck]['Q'].append(da)
assert len(da.dims)<=1,"Flow must have dims either time, or none"
def bc_geom_to_cells(self,geom):
""" geom: a LineString geometry. Return the list of cells interior
to the linestring
"""
cells=[]
for j in self.bc_geom_to_edges(geom):
j_cells=self.grid.edge_to_cells(j)
assert j_cells.min()<0
assert j_cells.max()>=0
cells.append(j_cells.max())
return cells
def bc_to_interior_cell_layer(self,bc):
"""
Determine the cell and layer for a source/sink BC.
"""
# TODO: use bc.z, which is either an elevation or a 'bed'
# to choose the layer
c=self.bc_geom_to_interior_cell(bc.geom)
self.log.warning("Assuming source/sink is at bed")
k=int(self.config['Nkmax'])-1
return (c,k)
def bc_geom_to_interior_cell(self,geom):
""" geom: a Point or LineString geometry. In the case of a LineString,
only the first point is used.
return the index of the cell that the point or linestring node fall in
"""
coords=np.array(geom)
if coords.ndim==2:
coords=coords[0]
c=self.grid.select_cells_nearest(coords,inside=True)
assert c is not None,"%s did not match any cells. LineString may be reversed?"%str(coords)
return c
def bc_geom_to_edges(self,geom):
"""
geom: LineString geometry
return list of boundary edges adjacent to geom.
"""
return self.grid.select_edges_by_polyline(geom,update_e2c=False)
def set_times_from_config(self):
"""
Pull run_start,run_stop from a loaded config file.
"""
if self.restart:
start_files=self.start_inputs()
if start_files:
start=store_file.StoreFile(model=self,proc=0,filename=start_files[0])
self.run_start=start.time()
else:
# maybe we're constructing a restart? sequencing of this stuff,
# and the exact state of the model is quirky and under-designed
self.run_start=self.restart_model.restartable_time()
log.debug("Inferred start time of restart to be %s"%self.run_start)
else:
start_dt=datetime.datetime.strptime(self.config['starttime'],'%Y%m%d.%H%M%S')
self.run_start=utils.to_dt64(start_dt)
nsteps=int(self.config['nsteps'])
dt=np.timedelta64(1,'us') * int(1e6*float(self.config['dt']))
self.run_stop=self.run_start + nsteps*dt
def update_config(self):
assert self.config is not None,"Only support starting from template"
# Have to be careful about the difference between starttime,
# which reflects time0, and the start of the initial run,
# vs. run_start, which is when this simulation will begin
# (possibly restarting a prior simulation)
start_dt=utils.to_datetime(self.run_start)
end_dt=utils.to_datetime(self.run_stop)
# In the case of restarts, this needs to reflect the
# start of the first simulation, not a later restart.
if not self.restart:
self.config['starttime']=start_dt.strftime('%Y%m%d.%H%M%S')
else:
log.info("starttime pulled from previous run: %s"%self.config['starttime'])
restart_time=self.restart_model.restartable_time()
assert self.run_start==restart_time,"Configured sim start and restart timestamp don't match"
dt=np.timedelta64(1,'us') * int(1e6*float(self.config['dt']))
nsteps=(self.run_stop-self.run_start)/dt
log.info("Number of steps in this simulation: %d"%nsteps)
self.config['nsteps']=nsteps
max_faces=self.grid.max_sides
if int(self.config['maxFaces']) < max_faces:
log.debug("Increasing maxFaces to %d"%max_faces)
self.config['maxFaces']=max_faces
if self.coriolis_f=='auto':
if self.projection is None:
log.warning("No projection and coriolis_f is 'auto'. No coriolis!")
self.config['Coriolis_f']=0.0
else:
xy_ctr=self.grid.nodes['x'].mean(axis=0)
ll_ctr=self.native_to_ll(xy_ctr)
lat=ll_ctr[1]
# f=2*Omega*sin(phi)
Omega=7.2921e-5 # rad/s
f=2*Omega*np.sin(lat*np.pi/180.)
self.config['Coriolis_f']="%.5e"%f
log.debug("Using %.2f as latitude for Coriolis => f=%s"%(lat,self.config['Coriolis_f']))
elif self.coriolis_f is not None:
self.config['Coriolis_f']=self.coriolis_f
if len(self.mon_points):
self.config['numInterpPoints']=1
self.config['DataLocations']='profile_locs.dat'
self.config['NkmaxProfs']=0 # all layers
self.config['ProfileDataFile']="profdata.dat"
# could also make sure that config['ProfileVariables'] has a default like 'hu'
# and ntoutProfs has a reasonable value.
def restart_copier(self,src,dst,on_exists='replace_link'):
"""
src: source file for copy, relative to present working dir
dst: destination.
will either symlink or copy src to dst, based on self.restart_symlink
setting
In order to avoid a limit on chained symlinks, symlinks will point to
the original file.
"""
if os.path.lexists(dst):
# Allow replacing symlinks, but if dst is a real file, bail out
# to avoid anything sketchy
if on_exists=='replace_link' and os.path.islink(dst):
os.unlink(dst)
elif on_exists=='replace':
os.unlink(dst)
elif on_exists=='fail':
raise Exception("Restart copier %s=>%s found destination already exists. on_exists='fail'"%(src,dst))
else:
raise Exception("Unknown option for on_exists: %s"%on_exists)
if self.restart_symlink:
# this ensures that we don't build up long chains of
# symlinks
src=os.path.realpath(src)
src_rel=os.path.relpath(src,self.run_dir)
os.symlink(src_rel,dst)
else:
shutil.copyfile(src,dst)
def write_grid(self):
if not self.restart:
# Write a grid that suntans will read:
self.grid.write_suntans_hybrid(self.run_dir,overwrite=True,z_offset=self.z_offset)
self.write_grid_bathy()
else:
parent_base=os.path.dirname(self.restart)
for fn in ['cells.dat','edges.dat',
'points.dat','depths.dat-voro']:
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
def write_grid_bathy(self):
# And write cell bathymetry separately
# This filename is hardcoded into suntans, not part of
# the settings in suntans.dat (maybe it can be overridden?)
cell_depth_fn=os.path.join(self.run_dir,"depths.dat-voro")
cell_xy=self.grid.cells_center()
# make depth positive down
z=-(self.grid.cells['z_bed'] + self.z_offset)
min_depth=0+float(self.config['minimum_depth'])
shallow=z<min_depth
if np.any(shallow):
log.warning("%d of %d cell depths extend above z=0 even with offset of %.2f"%(np.sum(shallow),
len(shallow),
self.z_offset))
z=z.clip(min_depth,np.inf)
cell_xyz=np.c_[cell_xy,z]
np.savetxt(cell_depth_fn,cell_xyz) # space separated
if self.use_edge_depths:
# And edges, preparing for edge-based bathy
edge_depth_fn=os.path.join(self.run_dir,"depths.dat-edge")
edge_xy=self.grid.edges_center()
# make depth positive down
edge_depth=-(self.grid.edges['edge_z_bed'] + self.z_offset)
edge_xyz=np.c_[edge_xy,edge_depth]
np.savetxt(edge_depth_fn,edge_xyz) # space separated
def grid_as_dataset(self):
"""
Return the grid and vertical geometry in a xr.Dataset
following the naming of suntans/ugrid.
Note that this does not yet set all attributes -- TODO!
This method does apply z_offset to the grid.
"""
ds=self.grid.write_to_xarray()
ds=ds.rename({'face':'Nc',
'edge':'Ne',
'node':'Np',
'node_per_edge':'two',
'maxnode_per_face':'numsides'})
layers=self.layer_data()
z_min=layers.z_min.values
z_max=layers.z_max.values
Nk=layers.dims['Nk']
cc=self.grid.cells_center()
ds['xv']=('Nc',),cc[:,0]
ds['yv']=('Nc',),cc[:,1]
ds['z_r']=('Nk',),layers.z_mid.values + self.z_offset
ds['z_r'].attrs['positive']='down'
# not right for 3D..
ds['Nk']=('Nc',),Nk*np.ones(self.grid.Ncells(),np.int32)
# don't take any chances on ugrid assumptions -- exactly mimic
# the example:
ds['suntans_mesh']=(),0
ds.suntans_mesh.attrs.update( dict(cf_role='mesh_topology',
long_name='Topology data of 2D unstructured mesh',
topology_dimension=2,
node_coordinates="xp yp",
face_node_connectivity="cells",
edge_node_connectivity="edges",
face_coordinates="xv yv",
edge_coordinates="xe ye",
face_edge_connectivity="face",
edge_face_connectivity="grad") )
ds['cells']=('Nc','numsides'),self.grid.cells['nodes']
ds['nfaces']=('Nc',), [self.grid.cell_Nsides(c) for c in range(self.grid.Ncells())]
ds['edges']=('Ne','two'),self.grid.edges['nodes']
ds['neigh']=('Nc','numsides'), [self.grid.cell_to_cells(c,pad=True)
for c in range(self.grid.Ncells())]
ds['grad']=('Ne','two'),self.grid.edge_to_cells()
ds['xp']=('Np',),self.grid.nodes['x'][:,0]
ds['yp']=('Np',),self.grid.nodes['x'][:,1]
depth=-(self.grid.cells['z_bed'] + self.z_offset)
ds['dv']=('Nc',),depth.clip(float(self.config['minimum_depth']),np.inf)
# really ought to set attrs for everybody, but sign of depth is
# particular, so go ahead and do it here.
ds.dv.attrs.update( dict( standard_name='sea_floor_depth_below_geoid',
long_name='seafloor depth',
comment='Has offset of %.3f applied'%(-self.z_offset),
units='m',
mesh='suntans_mesh',
location='face',
positive='down') )
ds['dz']=('Nk',),-np.diff(layers.z_interface.values)
ds['mark']=('Ne',),self.grid.edges['mark']
return ds
def zero_initial_condition(self):
"""
Return a xr.Dataset for initial conditions, with all values
initialized to nominal zero values.
This dataset has z_offset applied.
"""
ds_ic=self.grid_as_dataset()
ds_ic['time']=('time',),[self.run_start]
for name,dims in [ ('eta',('time','Nc')),
('uc', ('time','Nk','Nc')),
('vc', ('time','Nk','Nc')),
('salt',('time','Nk','Nc')),
('temp',('time','Nk','Nc')),
('agec',('time','Nk','Nc')),
('agesource',('Nk','Nc')) ]:
shape=tuple( [ds_ic.dims[d] for d in dims] )
if name=='agealpha':
dtype=np.timedelta64
else:
dtype=np.float64
vals=np.zeros(shape,dtype)
if name=='eta':
vals += self.z_offset
ds_ic[name]=dims,vals
return ds_ic
met_pad=np.timedelta64(1,'D')
def zero_met(self,times=None):
"""
Create an empty (zero valued, and T=20degC) dataset for met
forcing.
times: defaults to 4 time steps bracketing the run, pass in
other ndarray(datetime64) to override
"""
ds_met=xr.Dataset()
# this is nt in the sample, but maybe time is okay??
# nope -- needs to be nt.
# quadratic interpolation is used, so we need to pad out before/after
# the simulation
if times is None:
times=[self.run_start-self.met_pad,
self.run_start,
self.run_stop,
self.run_stop+self.met_pad]
ds_met['nt']=('nt',),times
ds_met['Time']=('nt',),ds_met.nt.values
xxyy=self.grid.bounds()
xy0=[ 0.5*(xxyy[0]+xxyy[1]), 0.5*(xxyy[2]+xxyy[3])]
ll0=self.native_to_ll(xy0)
for name in ['Uwind','Vwind','Tair','Pair','RH','rain','cloud']:
ds_met["x_"+name]=("N"+name,),[ll0[0]]
ds_met["y_"+name]=("N"+name,),[ll0[1]]
ds_met["z_"+name]=("N"+name,),[10]
def const(dims,val):
shape=tuple( [ds_met.dims[d] for d in dims] )
return dims,val*np.ones(shape)
ds_met['Uwind']=const(('nt','NUwind'), 0.0)
ds_met['Vwind']=const(('nt','NVwind'), 0.0)
ds_met['Tair'] =const(('nt','NTair'), 20.0)
ds_met['Pair'] =const(('nt','NPair'), 1000.) # units?
ds_met['RH']=const(('nt','NRH'), 80.)
ds_met['rain']=const(('nt','Nrain'), 0.)
ds_met['cloud']=const(('nt','Ncloud'), 0.5)
return ds_met
def partition(self):
if self.restart:
# multiprocessor files except the .<proc> suffix, to be symlinked
# or copied
parent_base=os.path.dirname(self.restart)
multi_proc_files=['celldata.dat','cells.dat',
'edgedata.dat','edges.dat',
'nodes.dat','topology.dat']
if os.path.exists(os.path.join(parent_base,'depths.dat-edge.0')):
multi_proc_files.append('depths.dat-edge')
for fn_base in multi_proc_files:
for proc in range(self.num_procs):
fn=fn_base+".%d"%proc
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
# single files
single_files=['vertspace.dat']
if 'DataLocations' in self.config:
# UNTESTED
single_files.append(self.config['DataLocations'])
if 'ProfileDataFile' in self.config:
# UNTESTED
single_files.append(self.config['ProfileDataFile'])
for fn in single_files:
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
else:
self.run_mpi(["-g",self.sun_verbose_flag,"--datadir=%s"%self.run_dir])
sun_verbose_flag="-vv"
def run_simulation(self):
args=['-s']
if self.restart:
args.append("-r")
args+=[self.sun_verbose_flag,"--datadir=%s"%self.run_dir]
self.run_mpi(args)
def run_mpi(self,sun_args):
sun="sun"
if self.sun_bin_dir is not None:
sun=os.path.join(self.sun_bin_dir,sun)
cmd=[sun] + sun_args
if self.num_procs>1:
mpiexec="mpiexec"
if self.mpi_bin_dir is not None:
mpiexec=os.path.join(self.mpi_bin_dir,mpiexec)
cmd=[mpiexec,"-n","%d"%self.num_procs] + cmd
subprocess.call(cmd)
# Methods related to using model output
def restartable_time(self):
"""
If store output is enabled, and this run has already been
executed, return the datetime64 of the restart files.
Otherwise None
"""
store_files=self.store_outputs()
if not store_files:
return None
store=store_file.StoreFile(model=self,proc=0,filename=store_files[0])
return store.time()
def store_outputs(self):
store_fn=os.path.join(self.run_dir,self.config['StoreFile'])
fns=glob.glob( store_fn+"*" )
fns.sort()
return fns
def start_inputs(self):
start_fn=os.path.join(self.run_dir,self.config['StartFile'])
fns=glob.glob( start_fn+"*" )
fns.sort()
return fns
def avg_outputs(self):
"""
with mergeArrays=1, these get sequenced with nstepsperncfile
with mergeArrays=0, each processor gets a file.
currently this function does not expose the difference
"""
if int(self.config['calcaverage']):
fns=glob.glob(os.path.join(self.run_dir,self.config['averageNetcdfFile']+"*"))
fns.sort()
return fns
else:
return []
def map_outputs(self):
"""
return a list of map output files -- if netcdf output is enabled,
that is what will be returned.
Guaranteed to be in the order of subdomain numbering if mergeArrays=0,
and in chronological order if mergeArrays=1.
Currently you can't distinguish which is which just from the output
of this method.
"""
if int(self.config['outputNetcdf']):
if self.config['mergeArrays'] is None or int(self.config['mergeArrays']):
# in this case the outputs are chunked in time
# with names like Estuary_SUNTANS.nc_0000.nc
# i.e. <outputNetcdfFile>_<seqN>.nc
fns=glob.glob(os.path.join(self.run_dir,self.config['outputNetcdfFile']+"_*.nc"))
fns.sort()
return fns
else:
# convoluted, but allow for some of the odd name construction for
# per-domain files, relying only on the assumption that the
# suffix is the processor number.
fns=glob.glob(os.path.join(self.run_dir,self.config['outputNetcdfFile']+"*"))
procs=[int(fn.split('.')[-1]) for fn in fns]
order=np.argsort(procs)
fns=[fns[i] for i in order]
return fns
else:
raise Exception("Need to implement map output filenames for non-netcdf")
@classmethod
def parse_profdata(cls,fn):
"""
Parse the profdata.dat file associated with a run.
fn: path to file to parse.
This is a classmethod to allow external usage but keep it bundled with the
SunDriver class.
Returns an xarray dataset
NOTE: if this uses caching at some point in the future, monitor_output should
be adapted to make a copy since it mutates the dataset.
data format:
(4 byte int)numTotalDataPoints: Number of data points found on all processors. Note that
that this could be different from the number specified since some may lie outside the domain.
(4 byte int)numInterpPoints: Number of nearest neighbors to each point used for interpolation.
(4 byte int)NkmaxProfs: Number of vertical levels output in the profiles.
(4 byte int)nsteps: Total number of time steps in the simulation.
(4 byte int)ntoutProfs: Frequency of profile output. This implies a total of nsteps/ntoutProfs are output.
(8 byte double)dt: Time step size
(8 byte double array X NkmaxProfs)dz: Contains the vertical grid spacings.
(4 byte int array X numTotalDataPoints)allIndices: Contains the indices of each point that determines its
original location in the data file. This is mostly for debugging since the output data is resorted
so that it is in the same order as it appeared in the data file.
(4 byte int array X 2*numTotalDataPoints)dataXY: Contains the original data points at (or near) which profiles
are output.
(8 byte double array X numTotalDataPoints*numInterpPoints)xv: Array containing the x-locations of the nearest
neighbors to the dataXY points. If numInterpPoints=3, then the 3 closest neighbors to the point
(dataXY[2*i],dataXY[2*i+1]) are (xv[3*i],yv[3*i]), (xv[3*i+1],yv[3*i+1]), (xv[3*i+2],yv[3*i+2]).
(8 byte double array X numTotalDataPoints*numInterpPoints)yv: Array containing the y-locations of the nearest
neighbors to the dataXY points (see xv above).
"""
pdata=xr.Dataset()
with open(fn,'rb') as fp:
hdr_ints = np.fromfile(fp,np.int32,count=5)
pdata['num_total_data_points']=(),hdr_ints[0]
pdata['num_interp_points'] =(), hdr_ints[1]
pdata['nkmax_profs'] =(), hdr_ints[2]
pdata['nsteps'] =(), hdr_ints[3]
pdata['ntout_profs'] =(), hdr_ints[4]
pdata['dt'] =(), np.fromfile(fp,np.float64,1)[0]
pdata['dzz'] = ('layer',),np.fromfile(fp,np.float64,pdata['nkmax_profs'].item() )
pdata['all_indices'] = np.fromfile(fp,np.int32,pdata['num_total_data_points'].item())
dataxy = np.fromfile(fp,np.float64,2*pdata['num_total_data_points'].item())
pdata['request_xy'] =('request','xy'), dataxy.reshape( (-1,2) )
pdata['request_xy'].attrs['description']="Coordinates of the requested profiles"
xvyv = np.fromfile(fp,np.float64,2*(pdata['num_total_data_points']*pdata['num_interp_points']).item())
pdata['prof_xy'] =('profile','xy'), xvyv.reshape( (2,-1) ).transpose()
pdata['prof_xy'].attrs['description']="Coordinates of the output profiles"
return pdata
def read_profile_data_raw(self,scalar,pdata=None,memmap=True):
"""
scalar is one of HorizontalVelocityFile,
FreeSurfaceFile, etc
pdata: a previously parsed ProfData file, from parse_profdata. Can be passed
in to avoid re-parsing this file.
memmap: by default the file is memory mapped, which can be a huge performance
savings for large files. In some cases and platforms it is less stable,
though.
"""
if pdata is None:
pdata=self.parse_profdata(self.file_path('ProfileDataFile'))
prof_pnts = pdata.prof_xy
prof_len = prof_pnts.shape[0]
prof_fname = self.file_path(scalar) + ".prof"
if not os.path.exists(prof_fname):
log.debug("Request for profile for %s, but %s does not exist"%(scalar,prof_fname))
return None
# Figure out the shape of the output:
# I'm assuming that profile data gets spat out in the same
# ordering of dimensions as regular grid-based data
shape_per_step = []
# profiles.c writes u first then v, then w, each with a
# separate call to Write3DData()
if scalar == 'HorizontalVelocityFile':
shape_per_step.append(3)
# the outer loop is over profile points
shape_per_step.append(prof_len)
# And does it have z-levels? if so, that is the inner-most
# loop, so the last dimension of the array
if scalar != 'FreeSurfaceFile':
nkmax_profs = pdata['nkmax_profs'].item()
shape_per_step.append(nkmax_profs)
# better to use the size of the specific file we're opening:
prof_dat_size=os.stat(prof_fname).st_size
REALSIZE=8
bytes_per_step = REALSIZE * np.prod( np.array(shape_per_step) )
n_steps_in_file=int(prof_dat_size//bytes_per_step )
final_shape = tuple([n_steps_in_file] + shape_per_step)
if memmap: # BRAVE!
# print "Trying to memory map the data.."
data = np.memmap(prof_fname, dtype=np.float64, mode='r', shape=final_shape)
else:
data = np.fromfile(prof_fname,float64)
data = data.reshape(*final_shape)
# no caching at this point..
return data
monitor_nodata=999999
monitor_dv=None # caches dv_from_map results
def monitor_output(self,nan_nodata=False,dv_from_map=False):
"""
Return xarray Dataset including the monitor output
"""
if 'DataLocations' not in self.config: return None
pdata=self.parse_profdata(self.file_path('ProfileDataFile'))
file_to_var={'FreeSurfaceFile':'eta',
'HorizontalVelocityFile':'u',
'TemperatureFile':'temp',
'SalinityFile':'salt',
'EddyViscosityFile':'nut',
'VerticalVelocityFile':'w',
'ScalarDiffusivityFile':'kappa'}
# Try to figure out which variables have been output in profiles
# Just scan what's there, to avoid trying to figure out defaults.
for scalar in list(file_to_var.keys()):
raw_data=self.read_profile_data_raw(scalar,pdata=pdata)
if raw_data is not None:
if scalar=='FreeSurfaceFile':
dims=('time','profile')
elif scalar=='HorizontalVelocityFile':
dims=('time','xyz','profile','layer')
else:
dims=('time','profile','layer')
# May need to use a different layer dimension for w...
# print("%s: raw data shape: %s dims: %s"%(scalar,str(raw_data.shape),dims))
if nan_nodata and np.any(raw_data==self.monitor_nodata):
# this can significantly slow down the process if ultimately we're
# only going to use a small slice of the data
raw_data=np.where(raw_data==self.monitor_nodata,
np.nan,raw_data)
pdata[file_to_var[scalar]]=dims,raw_data
# This may need some tweaking, but it's a start.
# use microseconds to get some reasonable precision for fraction dt
# but note that this isn't necessarily exact.
dt_prof=np.timedelta64( int( pdata['ntout_profs']*pdata['dt']*1e6),'us')
pdata['time']=('time',),(self.run_start + dt_prof*np.arange(pdata.dims['time']))
if dv_from_map:
if self.monitor_dv is None:
if 0: # read from map file, but that may not be valid until end of run
print("Loading dv for monitor data - should happen once!")
self.monitor_dv=self.extract_station_map(xy=pdata.prof_xy.values[:,:],data_vars='dv')
else: # read from subdomain grids.
mon_dv=np.zeros(pdata.dims['profile'],np.float64)
mon_dv[:]=np.nan
for proc in range(self.num_procs):
gsub=self.subdomain_grid(proc)
for i,xy in enumerate(pdata.prof_xy.values):
c=gsub.select_cells_nearest(xy,inside=True)
if c is not None:
mon_dv[i]=gsub.cells[c]['dv']
assert np.all(np.isfinite(mon_dv)),"Failed to get depths for all profile locatins"
self.monitor_dv=xr.Dataset()
self.monitor_dv['dv']=('profile',),mon_dv
pdata['dv']=('profile',),self.monitor_dv['dv'].values
# Total hack for convenience -- add a closest_to([x,y]) method to extract a single
# profile.
@utils.add_to(pdata)
def closest_to(self,target):
dists=utils.dist(target,self['prof_xy'].values)
idx=np.argmin(dists)
return self.isel(profile=idx)
return pdata
_subdomain_grids=None
def subdomain_grid(self,p):
if self._subdomain_grids is None:
self._subdomain_grids={}
if p not in self._subdomain_grids:
sub_g=unstructured_grid.UnstructuredGrid.read_suntans_hybrid(path=self.run_dir,
points='points.dat',
edges='edges.dat.%d'%p,
cells='cells.dat.%d'%p)
# edge depth is an ad-hoc extension, not "standard" enough to be in
# read_suntans_hybrid, so add it in here:
edge_depth_fn=self.file_path('depth')+"-edge.%d"%p
if os.path.exists(edge_depth_fn):
edge_xyz=np.loadtxt(edge_depth_fn)
# 2019-05-29: this did not have a negation. probably that was wrong.
# transition away from edge_depth, anyway.
# sub_g.add_edge_field('edge_depth',edge_xyz[:,2])
sub_g.add_edge_field('edge_z_bed',-edge_xyz[:,2])
if ('dv' in sub_g.cells.dtype.names) and ('z_bed' not in sub_g.cells.dtype.names):
sub_g.add_cell_field('z_bed',-sub_g.cells['dv'])
self._subdomain_grids[p]=sub_g
return self._subdomain_grids[p]
@memoize.imemoize(lru=64)
def extract_transect_monitor(self,xy=None,ll=None,time=None,
time_mode='inner',dv_from_map=False,
dzmin_surface=None):
"""
In progress alternate approach for transects.
xy: [N,2] location of vertical profiles making up the transect
ll: like xy, but lon/lat to be converted via self.ll_to_native
time: can be used to pull a specific time for each xy (with time_mode='inner').
time_mode: for now, only 'inner'. May be expanded to control whether
time is used orthogonal to xy, or parallel (i.e. for each xy, do we pull
one corresponding time from time, or pull all of the time for each).
if time is not covered by the output, or the run has no monitor output,
will return None.
"""
if xy is None:
xy=self.ll_to_native(ll)
if time_mode=='outer':
assert time.ndim==0,"Not ready for array-valued time with time_mode='outer'"
def xyt():
if time_mode=='inner':
for loc,t in zip(xy,time):
yield loc,t
else:
for loc in xy:
yield loc,time
stns=[]
for loc,t in xyt():
if time_mode=='inner':
# then each location has a single time associated with it
# we can narrow extract_station in that case.
t_slice=(t,t)
else:
# potentially a range of times
# this should also a work when time is a scalar datetime64.
t_slice=(t.min(),t.max())
stn=self.extract_station_monitor(xy=loc,chain_count=t_slice,
dv_from_map=dv_from_map)
if stn is None:
log.warning('Found no monitor data for %s. Skip transect'%str(t_slice))
return None
if np.isscalar(t):
if (t<stn.time.values[0]) or (t>stn.time.values[-1]):
log.info("Requested time %s is outside the range of the model output"%t)
return None
ti=utils.nearest(stn.time.values,t)
stn=stn.isel(time=ti)
stns.append(stn)
tran=xr.concat(stns,dim='time')
# now cleanup nan/nodata
for v in tran.data_vars:
if not np.issubdtype(tran[v].dtype,np.floating): continue
missing=tran[v].values==self.monitor_nodata
tran[v].values[missing]=np.nan
xy=np.c_[ tran.station_x,tran.station_y ]
tran=tran.rename(time='sample')
tran['d_sample']=('sample',),utils.dist_along(xy)
if 'dzz' in tran:
assert 'eta' in tran,"Not ready for transect processing without eta"
dzz_2d,eta_2d=xr.broadcast(tran.dzz,tran.eta)
z_max=eta_2d
#Not ready for this.
if 'dv' in tran:
_,dv_2d=xr.broadcast(eta_2d,tran.dv)
z_min=-dv_2d
else:
z_min=-np.inf
tran['z_bot']=-dzz_2d.cumsum(dim='layer')
tran['z_top']=tran.z_bot+dzz_2d
tran['z_bot']=tran.z_bot.clip(z_min,z_max)
tran['z_top']=tran.z_top.clip(z_min,z_max)
tran['z_ctr']=0.5*(tran.z_bot+tran.z_top)
for fld in ['z_bot','z_top','z_ctr']:
tran[fld].attrs['positive']='up'
# to be consistent with xr_transect, and np.diff(z_ctr),
# z_dz is _negative_
tran['z_dz'] =(tran.z_bot-tran.z_top)
if dzmin_surface is not None:
self.adjust_transect_for_dzmin_surface(tran,dzmin_surf=dzmin_surface)
return tran
def adjust_transect_for_dzmin_surface(self,tran,update_vars=['salt','temp'],dzmin_surf=0.25):
"""
janky - it is not always clear in the output which layers are valid, versus when a layer
was really thin and was coalesced with the next layer down. This method
takes an xr_transect style transect, finds thin surface layers and copies the values from
lower down up to the surface cells.
This currently probably doesn't work for velocity, just scalar.
extract_transect_monitor will call this automatically if dzmin_surface is specified.
"""
from ... import xr_transect
z_dz=xr_transect.get_z_dz(tran)
for samp_i in range(tran.dims['sample']):
eta=tran.eta.isel(sample=samp_i)
k_update=[]
for k in range(tran.dims['layer']):
if z_dz[samp_i,k]==0.0:
continue # truly dry
elif tran.eta[samp_i] - tran.z_bot[samp_i,k] < dzmin_surf:
log.debug("[sample %s,k %s] too thin"%(samp_i,k))
k_update.append(k)
else:
# valid layer
for ku in k_update:
for v in update_vars:
tran[v].values[samp_i,ku] = tran[v].values[samp_i,k]
break
def extract_transect(self,xy=None,ll=None,time=slice(None),dx=None,
vars=['uc','vc','Ac','dv','dzz','eta','w']):
"""
xy: [N,2] coordinates defining the line of the transect
time: if an integer or slice of integers, interpret as index
into time dimension. otherwise try to convert to datetime64,
and then index into time coordinate.
dx: omit to use xy as is, or a length scale for resampling xy
returns xr.Dataset, unless xy does not intersect the grid at all,
in which case None is returned.
Simple chaining is allowed, but if time spans two runs, the later
run will be used.
"""
if xy is None:
xy=self.ll_to_native(ll)
if dx is not None:
xy=linestring_utils.upsample_linearring(xy,dx,closed_ring=False)
# check for chaining
if np.issubdtype(type(time),np.integer):
# time came in as an index, so no chaining.
pass
else:
# asarray() helps avoid xarray annoyance
dt=np.max(utils.to_dt64(np.asarray(time)))
if dt<self.run_start:
log.info("extract_transect: chain back")
run=self.chain_restarts(count=dt)[0]
if run is not self: # avoid inf. recursion
return run.extract_transect(xy=xy,ll=ll,time=time,dx=dx,
vars=vars)
else:
log.info("extract_transect: chain back just returned self.")
proc_point_cell=np.zeros( [self.num_procs,len(xy)], np.int32)-1
point_datasets=[None]*len(xy)
good_vars=None # set on-demand below
merged=int(self.config['mergeArrays'])>0
def gen_sources(): # iterator over proc,sub_g,map_fn
if merged:
map_fn=self.map_outputs()[0]
g=unstructured_grid.UnstructuredGrid.from_ugrid(map_fn)
yield [0,g,map_fn]
else:
for proc in range(self.num_procs):
yield proc,self.subdomain_grid(proc),self.map_outputs()[proc]
def time_to_isel(ds,times,mode='nearest'):
"""
return an argument suitable for isel, to pull one or more time steps
from ds.
ds: dataset with time dimension
times: integer, datetime64, or slice thereof.
mode: 'nearest' map a time to the nearest matching time
'before' map a time to the matching or preceding time step
'after' map a timem to the following time step.
"""
if isinstance(times,slice):
return slice(time_to_isel(ds,times.start,mode='before'),
time_to_isel(ds,times.stop,mode='after'))
else:
if np.issubdtype(type(times),np.integer):
# already an index
return times
else:
dns=utils.to_dnum(ds.time.values)
dn=utils.to_dnum(times)
if mode=='nearest':
return utils.nearest(dns,dn)
elif mode=='before':
return np.searchsorted(dns,dn)
elif mode=='after':
return np.searchsorted(dns,dn,side='right')
else:
raise Exception("Bad mode: %s"%mode)
for proc,sub_g,map_fn in gen_sources():
ds=None
for pnti,pnt in enumerate(xy):
if point_datasets[pnti] is not None:
continue
c=sub_g.select_cells_nearest(pnt,inside=True)
if c is not None:
proc_point_cell[proc,pnti]=c
if ds is None:
ds=xr.open_dataset(map_fn)
# doctor up the Nk dimensions
ds['Nkf']=ds['Nk'] # copy the variable
del ds['Nk'] # delete old variable, leaving Nk as just a dimension
if good_vars is None:
# drop any variables that don't appear in the output
good_vars=[v for v in vars if v in ds]
time_idx=time_to_isel(ds,time)
point_ds=ds[good_vars].isel(time=time_idx,Nc=c)
point_ds['x_sample']=pnt[0]
point_ds['y_sample']=pnt[1]
point_datasets[pnti]=point_ds
# drop xy points that didn't hit a cell
point_datasets=[p for p in point_datasets if p is not None]
if len(point_datasets)==0: # transect doesn't intersect grid at all.
log.debug("Transect points do not intersect model")
return None
transect=xr.concat(point_datasets,dim='sample')
renames=dict(Nk='layer',Nkw='interface',
uc='Ve',vc='Vn',w='Vu_int')
renames={x:renames[x] for x in renames if (x in transect) or (x in transect.dims)}
transect=transect.rename(**renames)
transect['U']=('sample','layer','xy'),np.concatenate( [transect.Ve.values[...,None],
transect.Vn.values[...,None]],
axis=-1)
if 'Vu_int' in transect:
Vu_int=transect.Vu_int.values.copy()
Vu_int[np.isnan(Vu_int)]=0.0
transect['Vu']=('sample','layer'), 0.5*(Vu_int[:,1:] + Vu_int[:,:-1])
# construct layer-center depths
if 'dzz' not in transect:
# fabricate a dzz
eta_2d,dv_2d,z_w_2d=xr.broadcast( transect['eta'], transect['dv'], -ds['z_w'])
z_w_2d=z_w_2d.clip(-dv_2d,eta_2d)
z_bot=z_w_2d.isel(Nkw=slice(1,None))
z_top=z_w_2d.isel(Nkw=slice(None,-1))
# must use values to avoid xarray getting smart with aligning axes.
dzz=z_top.values-z_bot.values
z_ctr=0.5*(z_bot.values+z_top.values)
z_ctr[dzz==0.0]=np.nan
else:
dzz=transect.dzz.values.copy() # sample, Nk
z_bot=transect['eta'].values[:,None] - dzz.cumsum(axis=1)
z_top=z_bot+dzz
z_ctr=0.5*(z_top+z_bot)
z_ctr[dzz==0.0]=np.nan # indicate no data
transect['z_ctr']=('sample','layer'), z_ctr
transect['z_top']=('sample','layer'), z_top
transect['z_bot']=('sample','layer'), z_bot
# first, the interior interfaces
def choose_valid(a,b):
return np.where(np.isfinite(a),a,b)
z_int=choose_valid(z_top[:,1:],z_bot[:,:-1])
# and no choice of where the first and last rows come from
z_int=np.concatenate( [z_top[:,:1],
z_int,
z_bot[:,-1:]],
axis=1)
transect['z_int']=('sample','interface'),z_int
# we've got dzz, so go ahead and use it, but honor xr_transect
# sign convention that z_dz ~ diff(z_int)
transect['z_dz']=('sample','layer'),-dzz
# helps with plotting
transect.attrs['source']=self.run_dir
return transect
warn_initial_water_level=0
def initial_water_level(self):
"""
some BC methods which want a depth need an estimate of the water surface
elevation, and the initial water level is as good a guess as any.
"""
if self.ic_ds is not None:
return float(self.ic_ds.eta.mean())
else:
if self.warn_initial_water_level==0:
log.warning("Request for initial water level, but no IC is set yet")
self.warn_initial_water_level+=1
return 0.0
def extract_station_monitor(self,xy=None,ll=None,chain_count=1,
dv_from_map=False,data_vars=None):
"""
Return a dataset for a single point in the model
xy: native model coordinates, [Nstation,2]
ll: lon/lat coordinates, [Nstation,2]
chain_count: max number of restarts to go back.
1=>no chaining just this model. None or 0:
chain all runs possible. Otherwise, go back max
number of runs up to chain_count
if chain_count is a np.datetime64, go back enough restarts to
get to that date (see chain_restarts())
if chain_count is a tuple of datetime64, only consider restarts covering
that period.
This version pulls output from history files
if dv_from_map is True, additionally pulls dv from map output.
if no data matches the time range of chain_count, or profile output
wasn't enable, returns None.
"""
if xy is None:
xy=self.ll_to_native(ll)
if chain_count!=1:
restarts=self.chain_restarts(count=chain_count,load_grid=False)
# dv should be constant, so only load it on self.
dss=[mod.extract_station_monitor(xy=xy,ll=ll,chain_count=1,
data_vars=data_vars,dv_from_map=False)
for mod in restarts]
if len(dss)==0:
return None
chained=xr.concat(dss,dim='time',data_vars='minimal')
if dv_from_map:
# just to get dv...
me=self.extract_station_monitor(xy=xy,ll=ll,chain_count=1,
dv_from_map=True)
chained['dv']=me.dv
return chained
mon=self.monitor_output(dv_from_map=dv_from_map)
if mon is None:
return None # maybe profile output wasn't enabled.
xy=np.asarray(xy)
orig_ndim=xy.ndim
if orig_ndim==1:
xy=xy[None,:]
elif orig_ndim>2:
raise Exception("Can only handle single coordinates or an list of coordinates")
num_stations=len(xy)
stations=[]
for stn in range(num_stations):
dists=utils.dist(xy[stn,:],mon.prof_xy.values)
best=np.argmin(dists)
station=mon.isel(profile=best)
if data_vars is not None:
for v in list(station.data_vars):
if v not in data_vars:
del station[v]
station['distance_from_target']=(),dists[best]
station['profile_index']=best
station['source']='monitor'
stations.append(station)
if orig_ndim==1:
# This used to be done after the fact -- just isel(station=0)
# but concatenation in xarray is super slow
combined_ds=stations[0]
combined_ds['station_x']=(), xy[0,0]
combined_ds['station_y']=(), xy[0,1]
else:
combined_ds=xr.concat(stations,dim='station')
combined_ds['station_x']=('station',), xy[...,0]
combined_ds['station_y']=('station',), xy[...,1]
return combined_ds
def extract_station(self,xy=None,ll=None,chain_count=1,source='auto',dv_from_map=False,
data_vars=None):
"""
See extract_station_map, extract_station_monitor for details.
Will try monitor output if it exists, otherwise map output.
source: 'auto' (default), 'map' or 'monitor' to force a choice.
If a specific source is chosen and doesn't exist, returns None
"""
if source in ['auto','monitor']:
ds=self.extract_station_monitor(xy=xy,ll=ll,chain_count=chain_count,
dv_from_map=dv_from_map,data_vars=data_vars)
if (ds is not None) or (source=='monitor'):
return ds
if source in ['auto','map']:
return self.extract_station_map(xy=xy,ll=ll,chain_count=chain_count,
data_vars=data_vars)
assert False,"How did we get here"
def extract_station_map(self,xy=None,ll=None,chain_count=1,data_vars=None):
"""
Return a dataset for a single point in the model
xy: native model coordinates, [Nstation,2]
ll: lon/lat coordinates, [Nstation,2]
chain_count: max number of restarts auto go back.
1=>no chaining just this model. None or 0:
chain all runs possible. Otherwise, go back max
number of runs up to chain_count
data_vars: list of variables to include, otherwise all.
This version pulls output from map files
"""
if xy is None:
xy=self.ll_to_native(ll)
map_fns=self.map_outputs()
# First, map request locations to processor and cell
xy=np.asarray(xy)
orig_ndim=xy.ndim
if orig_ndim==1:
xy=xy[None,:]
elif orig_ndim>2:
raise Exception("Can only handle single coordinates or an list of coordinates")
num_stations=len(xy)
# allocate, [proc,cell,distance] per point
matches=[[None,None,np.inf] for i in range(num_stations)]
# outer loop on proc
for proc,map_fn in enumerate(map_fns):
map_ds=xr.open_dataset(map_fn)
g=unstructured_grid.UnstructuredGrid.from_ugrid(map_ds)
cc=g.cells_center()
# inner loop on station
for station in range(num_stations):
c=g.select_cells_nearest(xy[station],inside=False)
d=utils.dist(cc[c],xy[station])
if d<matches[station][2]:
matches[station]=[proc,c,d]
# Now we know exactly which procs are useful, and can close
# the others
hot_procs={} # dictionary tracking which processors are useful
for station,(proc,c,d) in enumerate(matches):
hot_procs[proc]=(station,c,d)
for proc,map_fn in enumerate(map_fns):
if proc not in hot_procs:
xr.open_dataset(map_fn).close()
# otherwise close later
if chain_count==1:
runs=[self]
else:
runs=self.chain_restarts(count=chain_count)
dss=[] # per-restart datasets
# workaround for cases where numsides was not held constant
max_numsides=0
min_numsides=1000000
for run in runs:
model_out=None
for proc,map_fn in enumerate(run.map_outputs()):
if proc not in hot_procs: continue # doesn't have any hits
map_ds=xr.open_dataset(run.map_outputs()[proc])
# Work around bad naming of dimensions
map_ds['Nk_c']=map_ds['Nk']
del map_ds['Nk']
# wait until we've loaded one to initialize the dataset for this run
if model_out is None:
model_out=xr.Dataset() # not middle out
model_out['time']=map_ds.time
# allocate output variables:
for d in map_ds.data_vars:
if data_vars and d not in data_vars:
log.debug('Skipping variable %s'%d)
continue
if 'Nc' in map_ds[d].dims:
# put station first
new_dims=['station']
new_shape=[num_stations]
for d_dim in map_ds[d].dims:
if d_dim=='Nc':
continue # replaced by station above
else:
new_dims.append(d_dim)
new_shape.append(map_ds.dims[d_dim])
model_out[d]=tuple(new_dims), np.zeros(new_shape, map_ds[d].dtype )
# For vectorized indexing, pulls the stations we want want from this
# processor, but only gets as far as ordering them densely on this
# proc
Nc_indexer=xr.DataArray( [m[1] for m in matches if m[0]==proc ],
dims=['proc_station'] )
assert len(Nc_indexer),"Somehow this proc has no hits"
# and the station indexes in model_out to assign to.
# this can't use vectorized indexing because you can't assign to the
# result of vectorized indexing.
proc_stations=np.array( [i for i,m in enumerate(matches) if m[0]==proc] )
for d in map_ds.data_vars:
if d not in model_out: continue
# potentially gets one isel out of the tight loop
# this appears to work.
extracted=map_ds[d].isel(Nc=Nc_indexer)
# extracted will have 'station' in the wrong place. transpose
dims=['proc_station'] + [d for d in extracted.dims if d!='proc_station']
extractedT=extracted.transpose(*dims)
# this line is 90% of the time:
ext_vals=extractedT.values
model_out[d].values[proc_stations,...] = ext_vals
#for station in lin_idx,arr_idx in enumerate(np.ndindex(stn_shape)):
# if matches[lin_idx][0]!=proc: continue
#
# extracted=map_ds[d].isel(Nc=matches[lin_idx][1])
# # seems like voodoo -- construct an index into the output,
# # which will let us point to the desired station.
# sel=dict(zip(stn_dims,arr_idx))
# model_out[d].isel(sel).values[...]=extracted
if dss:
# limit to non-overlapping
time_sel=model_out.time.values>dss[-1].time.values[-1]
model_out=model_out.isel(time=time_sel)
if 'numsides' in model_out.dims:
max_numsides=max(max_numsides,model_out.dims['numsides'])
min_numsides=min(min_numsides,model_out.dims['numsides'])
dss.append(model_out)
if 'numsides' in model_out.dims:
if max_numsides!=min_numsides:
log.warning("numsides varies %d to %d over restarts. Kludge around"%
(min_numsides,max_numsides))
dss=[ ds.isel(numsides=slice(0,min_numsides)) for ds in dss]
combined_ds=xr.concat(dss,dim='time',data_vars='minimal',coords='minimal')
# copy from matches
combined_ds['distance_from_target']=('station',), np.zeros(num_stations, np.float64)
combined_ds['subdomain']=('station',), np.zeros(num_stations,np.int32)
combined_ds['station_cell']=('station',), np.zeros(num_stations,np.int32)
combined_ds['station_cell'].attrs['description']="Cell index in subdomain grid"
combined_ds['station_x']=('station',), xy[...,0]
combined_ds['station_y']=('station',), xy[...,1]
combined_ds['source']=('station',), ["map"]*num_stations
for station in range(num_stations):
# here we know the order and can go straight to values
combined_ds['distance_from_target'].values[station]=matches[station][2]
combined_ds['subdomain'].values[station]=matches[station][0]
if orig_ndim==1:
combined_ds=combined_ds.isel(station=0)
return combined_ds
| mit |
jm-begon/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
vex1023/vxData | setup.py | 1 | 2317 | from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import vxData as source_packages
here = os.path.abspath(os.path.dirname(__file__))
# 包名称
name = 'vxData'
# 版本号从源代码包里获取
version = source_packages.__version__
# 项目首页
home_pages = 'http://github.com/'
# 作者相关信息
author = source_packages.__author__
author_email = source_packages.__email__
# 项目简介
description = 'A股交易数据包'
# 测试用例
test_suite = 'vxData.tests.test_API'
# 项目分类
classifiers = [
'Programming Language :: Python3.4',
'Programming Language :: Python3.5',
'Development Status :: 4 - Beta',
'Natural Language :: Chinese',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: The MIT License (MIT)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
]
requirements = ['requests', 'pandas', 'vxUtils']
readme = None
long_description = ''
if os.path.exists('README.rst'):
readme = 'README.rst'
elif os.path.exists('README.md'):
readme = 'README.md'
if readme:
with open(readme, 'rb') as f:
long_description = f.read().decode('utf-8')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name=name,
version=version,
url=home_pages,
license='The MIT License (MIT)',
author=author,
tests_require=['pytest'],
install_requires=requirements,
cmdclass={'test': PyTest},
author_email=author_email,
description=description,
long_description=long_description,
packages=find_packages(),
include_package_data=True,
platforms='any',
test_suite=test_suite,
#classifiers=classifiers,
extras_require={
'testing': ['pytest'],
}
)
| mit |
dingocuster/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
pystruct/pystruct | benchmarks/random_tree_crf.py | 1 | 1467 | import numpy as np
from scipy import sparse
try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from scipy.sparse.csgraph import minimum_spanning_tree
from pystruct.learners import SubgradientSSVM
from pystruct.models import GraphCRF
def make_random_trees(n_samples=50, n_nodes=100, n_states=7, n_features=10):
crf = GraphCRF(inference_method='max-product', n_states=n_states,
n_features=n_features)
weights = np.random.randn(crf.size_joint_feature)
X, y = [], []
for i in range(n_samples):
distances = np.random.randn(n_nodes, n_nodes)
features = np.random.randn(n_nodes, n_features)
tree = minimum_spanning_tree(sparse.csr_matrix(distances))
edges = np.c_[tree.nonzero()]
X.append((features, edges))
y.append(crf.inference(X[-1], weights))
return X, y, weights
X, y, weights = make_random_trees(n_nodes=1000)
X_train, X_test, y_train, y_test = train_test_split(X, y)
#tree_model = MultiLabelClf(edges=tree, inference_method=('ogm', {'alg': 'dyn'}))
tree_model = GraphCRF(inference_method='max-product')
tree_ssvm = SubgradientSSVM(tree_model, max_iter=4, C=1, verbose=10)
print("fitting tree model...")
tree_ssvm.fit(X_train, y_train)
print("Training loss tree model: %f" % tree_ssvm.score(X_train, y_train))
print("Test loss tree model: %f" % tree_ssvm.score(X_test, y_test))
| bsd-2-clause |
erikness/AlephOne | zipline/transforms/ta.py | 6 | 8006 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import numpy as np
import pandas as pd
import talib
import copy
from six import iteritems
from zipline.transforms import BatchTransform
def zipline_wrapper(talib_fn, key_map, data):
# get required TA-Lib input names
if 'price' in talib_fn.input_names:
req_inputs = [talib_fn.input_names['price']]
elif 'prices' in talib_fn.input_names:
req_inputs = talib_fn.input_names['prices']
else:
req_inputs = []
# If there are multiple output names then the results are named,
# if there is only one output name, it usually 'real' is best represented
# by a float.
# Use a DataFrame to map sid to named values, and a Series map sid
# to floats.
if len(talib_fn.output_names) > 1:
all_results = pd.DataFrame(index=talib_fn.output_names,
columns=data.minor_axis)
else:
all_results = pd.Series(index=data.minor_axis)
for sid in data.minor_axis:
# build talib_data from zipline data
talib_data = dict()
for talib_key, zipline_key in iteritems(key_map):
# if zipline_key is found, add it to talib_data
if zipline_key in data:
values = data[zipline_key][sid].values
# Do not include sids that have only nans, passing only nans
# is incompatible with many of the underlying TALib functions.
if pd.isnull(values).all():
break
else:
talib_data[talib_key] = data[zipline_key][sid].values
# if zipline_key is not found and not required, add zeros
elif talib_key not in req_inputs:
talib_data[talib_key] = np.zeros(data.shape[1])
# if zipline key is not found and required, raise error
else:
raise KeyError(
'Tried to set required TA-Lib data with key '
'\'{0}\' but no Zipline data is available under '
'expected key \'{1}\'.'.format(
talib_key, zipline_key))
# call talib
if talib_data:
talib_result = talib_fn(talib_data)
# keep only the most recent result
if isinstance(talib_result, (list, tuple)):
sid_result = tuple([r[-1] for r in talib_result])
else:
sid_result = talib_result[-1]
all_results[sid] = sid_result
return all_results
def make_transform(talib_fn, name):
"""
A factory for BatchTransforms based on TALIB abstract functions.
"""
# make class docstring
header = '\n#---- TA-Lib docs\n\n'
talib_docs = getattr(talib, talib_fn.info['name']).__doc__
divider1 = '\n#---- Default mapping (TA-Lib : Zipline)\n\n'
mappings = '\n'.join(' {0} : {1}'.format(k, v)
for k, v in talib_fn.input_names.items())
divider2 = '\n\n#---- Zipline docs\n'
help_str = (header + talib_docs + divider1 + mappings
+ divider2)
class TALibTransform(BatchTransform):
__doc__ = help_str + """
TA-Lib keyword arguments must be passed at initialization. For
example, to construct a moving average with timeperiod of 5, pass
"timeperiod=5" during initialization.
All abstract TA-Lib functions accept a data dictionary containing
'open', 'high', 'low', 'close', and 'volume' keys, even if they do
not require those keys to run. For example, talib.MA (moving
average) is always computed using the data under the 'close'
key. By default, Zipline constructs this data dictionary with the
appropriate sid data, but users may overwrite this by passing
mappings as keyword arguments. For example, to compute the moving
average of the sid's high, provide "close = 'high'" and Zipline's
'high' data will be used as TA-Lib's 'close' data. Similarly, if a
user had a data column named 'Oil', they could compute its moving
average by passing "close='Oil'".
**Example**
A moving average of a data column called 'Oil' with timeperiod 5,
talib.transforms.ta.MA(close='Oil', timeperiod=5)
The user could find the default arguments and mappings by calling:
help(zipline.transforms.ta.MA)
**Arguments**
open : string, default 'open'
high : string, default 'high'
low : string, default 'low'
close : string, default 'price'
volume : string, default 'volume'
refresh_period : int, default 0
The refresh_period of the BatchTransform determines the number
of iterations that pass before the BatchTransform updates its
internal data.
\*\*kwargs : any arguments to be passed to the TA-Lib function.
"""
def __init__(self,
close='price',
open='open',
high='high',
low='low',
volume='volume',
refresh_period=0,
bars='daily',
**kwargs):
key_map = {'high': high,
'low': low,
'open': open,
'volume': volume,
'close': close}
self.call_kwargs = kwargs
# Make deepcopy of talib abstract function.
# This is necessary because talib abstract functions remember
# state, including parameters, and we need to set the parameters
# in order to compute the lookback period that will determine the
# BatchTransform window_length. TALIB has no way to restore default
# parameters, so the deepcopy lets us change this function's
# parameters without affecting other TALibTransforms of the same
# function.
self.talib_fn = copy.deepcopy(talib_fn)
# set the parameters
for param in self.talib_fn.get_parameters().keys():
if param in kwargs:
self.talib_fn.set_parameters({param: kwargs[param]})
# get the lookback
self.lookback = self.talib_fn.lookback
self.bars = bars
if bars == 'daily':
lookback = self.lookback + 1
elif bars == 'minute':
lookback = int(math.ceil(self.lookback / (6.5 * 60)))
# Ensure that window_length is at least 1 day's worth of data.
window_length = max(lookback, 1)
transform_func = functools.partial(
zipline_wrapper, self.talib_fn, key_map)
super(TALibTransform, self).__init__(
func=transform_func,
refresh_period=refresh_period,
window_length=window_length,
compute_only_full=False,
bars=bars)
def __repr__(self):
return 'Zipline BatchTransform: {0}'.format(
self.talib_fn.info['name'])
TALibTransform.__name__ = name
# return class
return TALibTransform
# add all TA-Lib functions to locals
for name in talib.abstract.__FUNCTION_NAMES:
fn = getattr(talib.abstract, name)
locals()[name] = make_transform(fn, name)
| apache-2.0 |
toobaz/pandas | pandas/tests/indexes/datetimes/test_scalar_compat.py | 2 | 12251 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"dayofyear",
"week",
"weekofyear",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"weekday_name",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
if field == "weekday_name":
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = getattr(Timestamp(idx[-1]), field)
else:
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "floor not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "ceil not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), "round not a {} multiple".format(round_freq)
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
| bsd-3-clause |
Carmezim/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/axes_grid/demo_edge_colorbar.py | 11 | 2597 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def get_demo_image():
import numpy as np
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
def demo_bottom_cbar(fig):
"""
A grid of 2x2 images with a colorbar for each column.
"""
grid = AxesGrid(fig, 121, # similar to subplot(132)
nrows_ncols = (2, 2),
axes_pad = 0.10,
share_all=True,
label_mode = "1",
cbar_location = "bottom",
cbar_mode="edge",
cbar_pad = 0.25,
cbar_size = "15%",
direction="column"
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("autumn"), plt.get_cmap("summer")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
cbar = grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label("Bar")
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_right_cbar(fig):
"""
A grid of 2x2 images. Each row has its own colorbar.
"""
grid = AxesGrid(F, 122, # similar to subplot(122)
nrows_ncols = (2, 2),
axes_pad = 0.10,
label_mode = "1",
share_all = True,
cbar_location="right",
cbar_mode="edge",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("spring"), plt.get_cmap("winter")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label('Foo')
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
if 1:
F = plt.figure(1, (5.5, 2.5))
F.subplots_adjust(left=0.05, right=0.93)
demo_bottom_cbar(F)
demo_right_cbar(F)
plt.draw()
plt.show()
| mit |
SteerSuite/steersuite-rutgers | steerstats/tools/plotting/PlotParameterData.py | 8 | 2041 |
import matplotlib
from pylab import *
import csv
from matplotlib.backends.backend_pdf import PdfPages
import sys
def cvsToDict(reader):
d = {}
rows=[]
keys=[]
for row in reader:
rows.append(row)
for item in rows[0]:
d[item]=[]
keys.append(item)
for row in rows[1:]:
i=0
for item in row:
d[keys[i]].append(item)
i=i+1
return d
def createFigure(data, parameter, metric):
fig = figure()
ax = fig.add_subplot(111)
ax.plot(data[parameter][:20], data[metric][:20], linewidth=6.0, color='red', marker='.',
markersize=30.0)
ax.set_xlabel(parameter, fontweight='bold', fontsize=24)
ax.set_ylabel(metric, fontweight='bold', fontsize=30)
# ax.set_title(parameter + ' vs ' + metric, fontweight='bold', fontsize=20)
# ax.get_title().set_visible(False)
# creates a grid in the background
ax.tick_params(axis='both', which='major', labelsize=18)
ax.tick_params(axis='both', which='minor', labelsize=16)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.xaxis.grid(color='gray', linestyle='dashed')
# Puts the grid behind the line being ploted.
ax.set_axisbelow(3)
# plt.axis("tight")
return fig
def plotData(pp, filename, parameter):
reader = csv.reader(open(filename, 'r'))
data = cvsToDict(reader)
for key, value in data.items():
print key
fig = createFigure(data, parameter, key)
pp.savefig(fig, bbox_inches='tight')
if __name__ == '__main__':
if len(sys.argv) != 3:
print "Usage:"
print "python PlotParameterData.py <file_name> <parameter>"
print "Example:"
print "python PlotParameterData.py ../../data/optimization/rvo2d/Uni-Variet/rvo2d_max_neighbors.csv rvo_max_neighbors"
print ""
sys.exit(0)
filename=sys.argv[1]
parameter=sys.argv[2]
pp = PdfPages(parameter + '_plots.pdf')
plotData(pp, filename, parameter)
pp.close()
| gpl-3.0 |
NunoEdgarGub1/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
hansmeets/bio2rdf-scripts | linkedSPLs/LinkedSPLs-activeMoiety/mergeToActiveMoiety.py | 1 | 3220 | '''
Created 08/15/2014
@authors: Yifan Ning
@summary: merge preferred term, UNII, NUI, preferredNameAndRole, Drug bank URI, ChEBI URI, rxnorm URI, OMOP id, DrOn id for active moiety together.
'''
import os, sys
import pandas as pd
from StringIO import StringIO
import numpy as np
#from numpy import nan
## Define data inputs
#UNIIS = "../linkedSPLs-update/data/UMLS/UNIIs-from-UMLS.txt"
#PT_UNII = "mappings/FDAPreferredTermToUNII.tsv"
#PT_RXCUI = "mappings/fda-active-moiety-string-name-rxnorm-mapping.csv"
UNII_PT_RXCUI = "mappings/PreferredTerm-UNII-Rxcui-mapping.txt"
DRON_CHEBI_RXCUI = "mappings/cleaned_dron-to-chebi-and-rxnorm-11192014.txt"
OMOP_RXCUI = "mappings/mappings-backup-0723/omopid_rxcui.csv"
PT_CHEBI = "mappings/UNIIToChEBI-11162014.txt"
PT_DRUGBANK = "mappings/fda-substance-preferred-name-to-drugbank-11162014.txt"
UNII_NUI_PREFERRED_NAME_ROLE = "mappings/EPC_extraction_most_recent_11162014.txt"
CHEBI_BASE_URI = "http://purl.obolibrary.org/obo/"
## read mappings of pt, unii and rxcui
unii_pt_rxcui_cols = ['unii','pt','rxcui']
unii_pt_rxcui_DF = pd.read_csv(UNII_PT_RXCUI, sep='\t', names=unii_pt_rxcui_cols, skiprows=[0])
## read mappings of pt and drugbank uri
pt_drugbank_cols = ['pt','db_uri1','db_uri2']
pt_drugbank_DF = pd.read_csv(PT_DRUGBANK, sep='\t', names=pt_drugbank_cols)
## read mappings of omop id and rxcui
omop_rxcui_cols = ['omopid','rxcui']
omop_rxcui_DF = pd.read_csv(OMOP_RXCUI, sep='|', names=omop_rxcui_cols)
#print omop_rxcui_DF.info()
## read mappings of pt and chebi
pt_chebi_cols = ['pt','chebi']
pt_chebi_DF = pd.read_csv(PT_CHEBI, sep='\t', names=pt_chebi_cols)
#print pt_chebi_DF.info()
## read mappings of dron and rxcui
dron_chebi_rxcui_cols = ['dron','chebi','rxcui']
dron_chebi_rxcui_DF = pd.read_csv(DRON_CHEBI_RXCUI, sep='|', names=dron_chebi_rxcui_cols)[['dron','rxcui']]
dron_chebi_rxcui_DF = dron_chebi_rxcui_DF[dron_chebi_rxcui_DF.dron.notnull()]
print dron_chebi_rxcui_DF
## read mappings of unii, nui and preferredNameAndRole
unii_nui_namerole_cols = ['setid', 'unii','nui','nameAndRole']
unii_nui_namerole_DF = pd.read_csv(UNII_NUI_PREFERRED_NAME_ROLE, sep='\t', names=unii_nui_namerole_cols)[['unii','nui','nameAndRole']]
#print unii_nui_namerole_DF.info()
## merge pt, unii, rxcui and drugbank uri
unii_pt_rxcui_db_DF = unii_pt_rxcui_DF.merge(pt_drugbank_DF, on=['pt'], how='left')
print unii_pt_rxcui_db_DF.info()
# ## merge omop to pt_unii_db_rxcui_DF
merged_omop_DF = unii_pt_rxcui_db_DF.merge(omop_rxcui_DF, on=['rxcui'], how='left')
print merged_omop_DF.info()
# ## merge chebi to merged_omop_DF
merged_chebi_DF = merged_omop_DF.merge(pt_chebi_DF, on=['pt'], how='left')
print merged_chebi_DF.info()
# ## merge dronid to merged_chebi_DF
merged_dron_DF = merged_chebi_DF.merge(dron_chebi_rxcui_DF, on=['rxcui'], how='left')
print merged_dron_DF.info()
# ## merge <nui> and <preferred name and role> to merged_dron_DF
merged_epc_DF = merged_dron_DF.merge(unii_nui_namerole_DF, on=['unii'], how='left')
#merged_epc_DF[['rxcui']] = merged_epc_DF[['rxcui']].astype(str)
#merged_epc_DF[['omopid']] = merged_epc_DF[['omopid']].astype(str)
merged_epc_DF.to_csv('mergedActiveMoiety.csv', sep='\t', index=False)
| mit |
cmap/cmapPy | cmapPy/math/tests/test_fast_cov.py | 1 | 17355 | import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.math.fast_cov as fast_cov
import numpy
import tempfile
import os
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestFastCov(unittest.TestCase):
@staticmethod
def build_standard_x_y():
x = numpy.array([[1,2,3], [5,7,11]], dtype=float)
logger.debug("x: {}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[13, 17, 19], [23, 29, 31]], dtype=float)
logger.debug("y: {}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
@staticmethod
def build_nan_containing_x_y():
x = numpy.array([[1,numpy.nan,2], [3,5,7], [11,13,17]], dtype=float)
logger.debug("x:\n{}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[19, 23, 29], [31, 37, 41], [numpy.nan, 43, 47]], dtype=float)
logger.debug("y:\n{}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
def test_validate_inputs(self):
shape = (3,2)
#happy path just x
x = numpy.zeros(shape)
fast_cov.validate_inputs(x, None, None)
x = numpy.zeros(1)
fast_cov.validate_inputs(x, None, None)
#unhappy path just x, x does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, None, None)
logger.debug("unhappy path just x, x does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
#unhappy path x does not have shape attribute, y does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, 3, None)
logger.debug("unhappy path x does not have shape attribute, y does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
self.assertIn("y needs to be numpy array-like", str(context.exception))
#happy path x and y
x = numpy.zeros(shape)
y = numpy.zeros(shape)
fast_cov.validate_inputs(x, y, None)
#happy path y different shape from x
y = numpy.zeros((3,1))
fast_cov.validate_inputs(x, y, None)
#unhappy path y different shape from x, invalid axis
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y.T, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x.T, y, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
#happy path with x, destination
x = numpy.zeros(shape)
dest = numpy.zeros((shape[1], shape[1]))
fast_cov.validate_inputs(x, None, dest)
#unhappy path with x, destination wrong size
dest = numpy.zeros((shape[1]+1, shape[1]))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, None, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x - context.exception: {}".format(context.exception))
self.assertIn("x and destination provided", str(context.exception))
self.assertIn("destination must have shape matching", str(context.exception))
#happy path with x, y, destination
x = numpy.zeros(shape)
y = numpy.zeros((shape[0], shape[1]+1))
dest = numpy.zeros((shape[1], shape[1]+1))
fast_cov.validate_inputs(x, y, dest)
#unhappy path x, y, destination wrong size
dest = numpy.zeros((shape[1], shape[1]+2))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x, y - context.exception: {}".format(context.exception))
self.assertIn("x, y, and destination provided", str(context.exception))
self.assertIn("destination must have number of", str(context.exception))
def test_fast_cov_check_validations_run(self):
#unhappy path check that input validation checks are run
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.fast_cov(None, None)
logger.debug("unhappy path check that input validation checks are run - context.exception: {}".format(context.exception))
def test_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_standard_x_y()
ex = numpy.cov(x, rowvar=False)
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path just x, uses destination
dest = numpy.zeros((x.shape[1], x.shape[1]))
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path just x, uses destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination which is a different type
dest = dest.astype(numpy.float16)
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path, just x, uses destination which is a different type - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination that is a numpy.memmap
outfile = tempfile.mkstemp()
logger.debug("happy path, just x, uses destination which is a numpy.memmap - outfile: {}".format(outfile))
dest = numpy.memmap(outfile[1], dtype="float16", mode="w+", shape=ex.shape)
dest_array = numpy.asarray(dest)
r = fast_cov.fast_cov(x, destination=dest_array)
dest.flush()
logger.debug(" - r: {}".format(r))
os.close(outfile[0])
os.remove(outfile[1])
#happy path just x, transposed
ex = numpy.cov(x, rowvar=True)
logger.debug("happy path just x, transposed, expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_standard_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x, y, and destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x, y, and destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path x and y, other direction
combined = numpy.hstack([x.T, y.T])
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("happy path x and y, other direction, raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T, y.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, _ = TestFastCov.build_standard_x_y()
y = numpy.array([[13, 17, 19, 23, 41], [23, 29, 31, 37, 43]])
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex: {}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x and y different shapes, using destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x and y different shapes, using destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
def test_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
def test_calculate_non_mask_overlaps(self):
x = numpy.zeros((3,3))
x[0,1] = numpy.nan
x = numpy.ma.array(x, mask=numpy.isnan(x))
logger.debug("happy path x has 1 nan - x:\n{}".format(x))
r = fast_cov.calculate_non_mask_overlaps(x.mask, x.mask)
logger.debug("r:\n{}".format(r))
expected = numpy.array([[3,2,3], [2,2,2], [3,2,3]], dtype=int)
self.assertTrue(numpy.array_equal(expected, r))
def test_nan_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_nan_containing_x_y()
ex_with_nan = numpy.cov(x, rowvar=False)
logger.debug("expected with nan's - ex_with_nan:\n{}".format(ex_with_nan))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex_with_nan)
self.assertTrue(numpy.allclose(ex_with_nan[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
u = x[1:, 1]
for i in range(3):
t = x[1:, i]
c = numpy.cov(t, u, bias=False)[0,1]
check_nominal_nans.append(c)
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
self.assertTrue(numpy.allclose(check_nominal_nans, r[:, 1]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, :]))
def test_nan_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_nan_containing_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex:\n{}".format(ex))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, t = TestFastCov.build_nan_containing_x_y()
y = numpy.zeros((t.shape[0], t.shape[1]+1))
y[:, :t.shape[1]] = t
y[:, t.shape[1]] = [53, 59, 61]
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex:\n{}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2,3]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_all_nan(self):
x = numpy.zeros(3)
x[:] = numpy.nan
x = x[:, numpy.newaxis]
logger.debug("x:\n{}".format(x))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
self.assertEqual(0, numpy.sum(numpy.isnan(r)))
def test_nan_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_nan_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.nan_fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
| bsd-3-clause |
bartosh/zipline | tests/test_tradesimulation.py | 3 | 5455 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
import pandas as pd
from mock import patch
from nose_parameterized import parameterized
from six.moves import range
from zipline import TradingAlgorithm
from zipline.gens.sim_engine import BEFORE_TRADING_START_BAR
from zipline.finance.performance import PerformanceTracker
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.test_algorithms import NoopAlgorithm
from zipline.testing.fixtures import (
WithDataPortal,
WithSimParams,
WithTradingEnvironment,
ZiplineTestCase,
)
from zipline.utils import factory
from zipline.testing.core import FakeDataPortal
from zipline.utils.calendars.trading_calendar import days_at_time
class BeforeTradingAlgorithm(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.before_trading_at = []
super(BeforeTradingAlgorithm, self).__init__(*args, **kwargs)
def before_trading_start(self, data):
self.before_trading_at.append(self.datetime)
def handle_data(self, data):
pass
FREQUENCIES = {'daily': 0, 'minute': 1} # daily is less frequent than minute
class TestTradeSimulation(WithTradingEnvironment, ZiplineTestCase):
def fake_minutely_benchmark(self, dt):
return 0.01
def test_minutely_emissions_generate_performance_stats_for_last_day(self):
params = factory.create_simulation_parameters(num_days=1,
data_frequency='minute',
emission_rate='minute')
with patch.object(BenchmarkSource, "get_value",
self.fake_minutely_benchmark):
algo = NoopAlgorithm(sim_params=params, env=self.env)
algo.run(FakeDataPortal(self.env))
self.assertEqual(len(algo.perf_tracker.sim_params.sessions), 1)
@parameterized.expand([('%s_%s_%s' % (num_sessions, freq, emission_rate),
num_sessions, freq, emission_rate)
for freq in FREQUENCIES
for emission_rate in FREQUENCIES
for num_sessions in range(1, 4)
if FREQUENCIES[emission_rate] <= FREQUENCIES[freq]])
def test_before_trading_start(self, test_name, num_days, freq,
emission_rate):
params = factory.create_simulation_parameters(
num_days=num_days, data_frequency=freq,
emission_rate=emission_rate)
def fake_benchmark(self, dt):
return 0.01
with patch.object(BenchmarkSource, "get_value",
self.fake_minutely_benchmark):
algo = BeforeTradingAlgorithm(sim_params=params, env=self.env)
algo.run(FakeDataPortal(self.env))
self.assertEqual(
len(algo.perf_tracker.sim_params.sessions),
num_days
)
bts_minutes = days_at_time(
params.sessions, time(8, 45), "US/Eastern"
)
self.assertTrue(
bts_minutes.equals(
pd.DatetimeIndex(algo.before_trading_at)
),
"Expected %s but was %s." % (params.sessions,
algo.before_trading_at))
class BeforeTradingStartsOnlyClock(object):
def __init__(self, bts_minute):
self.bts_minute = bts_minute
def __iter__(self):
yield self.bts_minute, BEFORE_TRADING_START_BAR
class TestBeforeTradingStartSimulationDt(WithSimParams,
WithDataPortal,
ZiplineTestCase):
def test_bts_simulation_dt(self):
code = """
def initialize(context):
pass
"""
algo = TradingAlgorithm(script=code,
sim_params=self.sim_params,
env=self.env)
algo.perf_tracker = PerformanceTracker(
sim_params=self.sim_params,
trading_calendar=self.trading_calendar,
env=self.env,
)
dt = pd.Timestamp("2016-08-04 9:13:14", tz='US/Eastern')
algo_simulator = AlgorithmSimulator(
algo,
self.sim_params,
self.data_portal,
BeforeTradingStartsOnlyClock(dt),
algo._create_benchmark_source(),
NoRestrictions(),
None
)
# run through the algo's simulation
list(algo_simulator.transform())
# since the clock only ever emitted a single before_trading_start
# event, we can check that the simulation_dt was properly set
self.assertEqual(dt, algo_simulator.simulation_dt)
| apache-2.0 |
0asa/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
TuringMachinegun/Turing_Neural_Networks | examples/central_pattern_generator_tm.py | 1 | 6040 | __author__ = 'Giovanni Sirio Carmantini'
"""In this file, a R-ANN is constructed from a Turing Machine which
dynamics reproduce two gait pattern sequences, depending on the
machine control state.
A Generalized Shift is first created from the TM description.
Subsequently, an NDA simulating the GS dynamics is created.
Then, a R-ANN is constructed from the NDA.
The dynamics of the R-ANN is simulated, while manipulating
the activation of the c_x neuron, and the two gait patterns are
produced. Results are visualized.
"""
import os.path
import sys
import inspect
curr_file_path = os.path.realpath(inspect.getfile(inspect.currentframe()))
curr_dir_path = os.path.dirname(curr_file_path)
parent_dir = os.path.join(curr_dir_path, os.path.pardir)
sys.path.append(parent_dir)
import symdyn
import neuraltm
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
# Turing Machine description (latex syntax for typesetting in plot)
tape_symbols = ["1", "2", "3", "4"]
states = ["w", "g"]
tm_descr = {("w", "1"): ("w", "3", "S"),
("w", "2"): ("w", "4", "S"),
("w", "3"): ("w", "2", "S"),
("w", "4"): ("w", "1", "S"),
("g", "1"): ("g", "2", "S"),
("g", "2"): ("g", "3", "S"),
("g", "3"): ("g", "4", "S"),
("g", "4"): ("g", "1", "S")}
# create encoders for states and tape symbols
ge_q = symdyn.GodelEncoder(states)
ge_s = symdyn.GodelEncoder(tape_symbols)
# and from the simple encoders, create the actual encoders for the
# alpha and beta subsequences
ge_alpha = symdyn.compactGodelEncoder(ge_q, ge_s)
ge_beta = ge_s
# create Generalized Shift from machine description...
tm_gs = symdyn.TMGeneralizedShift(states, tape_symbols, tm_descr)
# ...then NDA from the Generalized Shift and encoders...
nda = symdyn.NonlinearDynamicalAutomaton(tm_gs, ge_alpha, ge_beta)
# ... and finally the R-ANN simulating the TM from the NDA
tm_nn = neuraltm.NeuralTM(nda)
# set initial conditions for the computation
init_state = ge_alpha.encode_sequence(["g", "1"])
init_tape = ge_beta.encode_sequence(list("1"))
# run R-ANN
inp = np.zeros(16) # set input for each time step
inp[:8] = 0.3
inp[8:] = 0.7
tm_nn.MCLx.activation[:] = 0
tm_nn.MCLy.activation[:] = init_tape
iterations = inp.size
MCL_n = tm_nn.MCLx.n_units + tm_nn.MCLy.n_units
BSL_n = tm_nn.BSLbx.n_units + tm_nn.BSLby.n_units
LTL_n = tm_nn.LTL.n_units
ld = OrderedDict(
[("LTL", {"acts": np.zeros((LTL_n, iterations)), "n": LTL_n}),
("BSL", {"acts": np.zeros((BSL_n, iterations)), "n": BSL_n}),
("MCL", {"acts": np.zeros((MCL_n, iterations)), "n": MCL_n})])
# run
for i, input_state in enumerate(inp):
tm_nn.MCLx.activation[:] = input_state
ld["MCL"]["acts"][:, i] = np.concatenate((tm_nn.MCLx.activation,
tm_nn.MCLy.activation))
tm_nn.run_net()
ld["BSL"]["acts"][:, i] = np.concatenate((tm_nn.BSLbx.activation,
tm_nn.BSLby.activation))
ld["LTL"]["acts"][:, i] = tm_nn.LTL.activation
# ...and plot
plt.ion()
timeseries_fig = plt.figure()
gs = gridspec.GridSpec(nrows=6, ncols=2, width_ratios=[14, 1],
height_ratios=[20] +
[ld[l]["n"] for l in ld] +
[3, 20])
for i, k in enumerate(ld, 1):
ld[k]["ax"] = plt.subplot(gs[i, 0])
n = float(ld[k]["acts"].shape[0])
ld[k]["plot"] = ld[k]["ax"].pcolor(ld[k]["acts"], cmap="OrRd")
ld[k]["ax"].set_ylim([0, n])
ld[k]["ax"].set_yticks([n / 2])
ld[k]["ax"].set_yticklabels([k + " units"])
ld[k]["ax"].set_xticks(range(iterations))
ld[k]["ax"].set_xticklabels([])
for tick in ld[k]["ax"].yaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
for tick in ld[k]["ax"].xaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
plt.grid(axis="x")
cbar_ax = plt.subplot(gs[1:4, 1])
cbar_ax.set_xticks([])
ld[k]["plot"].set_clim(vmin=0, vmax=1)
cbar = plt.colorbar(ld[k]["plot"], cbar_ax)
cbar.solids.set_edgecolor("face")
cbar.solids.set_rasterized(True)
cbar_ax.set_ylabel("Activation")
inp_ax = plt.subplot(gs[5, 0])
inp_axr = inp_ax.twinx()
inp_ax.set_xlim([0, iterations])
inp_axr.set_xlim([0, iterations])
inp_ax.bar(range(inp.size), inp, width=1,
edgecolor="none", facecolor="black")
inp_ax.set_yticks([0.5])
inp_ax.set_yticklabels(["$c_x$ activation"])
inp_ax.set_ylim([0, 1])
inp_axr.set_ylim([0, 1])
inp_axr.set_yticks([0, 1])
inp_ax.set_xticks(range(iterations))
inp_ax.set_xticklabels([])
inp_ax.set_xticks(np.array(range(iterations)) + 0.5, minor=True)
inp_ax.set_xticklabels(range(iterations), ha="center", minor=True)
inp_ax.grid(axis="x", which="major")
inp_ax.set_xlabel("Time step")
inp_ax.arrow(float(iterations) / 2, 1.1, 0,
0.8, fc='black', ec='black',
width=0.5,
head_width=1, head_length=0.2,
clip_on=False,
length_includes_head=True)
for tick in inp_ax.yaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
for tick in inp_ax.xaxis.get_minor_ticks():
tick.tick1On = tick.tick2On = False
gait_ax = plt.subplot(gs[0, 0])
gait_ax.set_xticks(range(9))
gait_ax.set_xticklabels([])
gait_ax.set_yticks([])
gait_ax.set_yticklabels([])
gait_ax.grid(axis="x", linestyle="-")
for tick in gait_ax.xaxis.get_major_ticks():
tick.tick1On = tick.tick2On = False
plt.tight_layout()
# Plot also syntetic ERPs
synth_fig = plt.figure()
s_ax = synth_fig.add_subplot("111")
plt.style.use("ggplot")
all_acts = np.concatenate(
(ld["MCL"]["acts"], ld["BSL"]["acts"], ld["LTL"]["acts"]), axis=0)
walk_acts = np.mean(all_acts, axis=0)[:8]
gallop_acts = np.mean(all_acts, axis=0)[8:]
s_ax.plot(range(8), walk_acts, label="Walk gait", color="blue", lw=2)
s_ax.plot(range(8), gallop_acts, label="Gallop gait", color="red", lw=2)
s_ax.axis([0, 7, 0, 0.2])
s_ax.set_xlabel("Time step")
s_ax.set_ylabel("Mean network activation")
s_ax.legend()
| mit |
vortex-ape/scikit-learn | sklearn/cluster/optics_.py | 2 | 34019 | # -*- coding: utf-8 -*-
"""Ordering Points To Identify the Clustering Structure (OPTICS)
These routines execute the OPTICS algorithm, and implement various
cluster extraction methods of the ordered list.
Authors: Shane Grigsby <[email protected]>
Amy X. Zhang <[email protected]>
License: BSD 3 clause
"""
from __future__ import division
import warnings
import numpy as np
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ._optics_inner import quick_scan
def optics(X, min_samples=5, max_eps=np.inf, metric='euclidean',
p=2, metric_params=None, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001, algorithm='ball_tree',
leaf_size=30, n_jobs=None):
"""Perform OPTICS clustering from vector array
OPTICS: Ordering Points To Identify the Clustering Structure
Closely related to DBSCAN, finds core sample of high density and expands
clusters from them. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large point datasets than
the current sklearn implementation of DBSCAN.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features)
The data.
min_samples : int (default=5)
The number of samples in a neighborhood for a point to be considered
as a core point.
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for them to be considered
as in the same neighborhood. Default value of "np.inf" will identify
clusters across all scales; reducing `max_eps` will result in
shorter run times.
metric : string or callable, optional (default='euclidean')
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
maxima_ratio : float, optional (default=.75)
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional (default=.7)
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional (default=.4)
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional (default=.003)
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1 (default=0.005)
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional (default=.001)
Used to determine neighborhood size for minimum cluster membership.
Each local maxima should be a largest value in a neighborhood
of the `size min_maxima_ratio * len(X)` from left and right.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree` (default)
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
See also
--------
OPTICS
An estimator interface for this clustering algorithm.
dbscan
A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
clust = OPTICS(min_samples, max_eps, metric, p, metric_params,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio,
algorithm, leaf_size, n_jobs)
clust.fit(X)
return clust.core_sample_indices_, clust.labels_
class OPTICS(BaseEstimator, ClusterMixin):
"""Estimate clustering structure from vector array
OPTICS: Ordering Points To Identify the Clustering Structure
Closely related to DBSCAN, finds core sample of high density and expands
clusters from them. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large point datasets than
the current sklearn implementation of DBSCAN.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
min_samples : int (default=5)
The number of samples in a neighborhood for a point to be considered
as a core point.
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for them to be considered
as in the same neighborhood. Default value of "np.inf" will identify
clusters across all scales; reducing `max_eps` will result in
shorter run times.
metric : string or callable, optional (default='euclidean')
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
maxima_ratio : float, optional (default=.75)
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional (default=.7)
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional (default=.4)
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional (default=.003)
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1 (default=0.005)
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional (default=.001)
Used to determine neighborhood size for minimum cluster membership.
Each local maxima should be a largest value in a neighborhood
of the `size min_maxima_ratio * len(X)` from left and right.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree` (default)
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : array, shape (n_core_samples,)
Indices of core samples.
labels_ : array, shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
reachability_ : array, shape (n_samples,)
Reachability distances per sample.
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
See also
--------
DBSCAN
A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
def __init__(self, min_samples=5, max_eps=np.inf, metric='euclidean',
p=2, metric_params=None, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001, algorithm='ball_tree',
leaf_size=30, n_jobs=None):
self.max_eps = max_eps
self.min_samples = min_samples
self.maxima_ratio = maxima_ratio
self.rejection_ratio = rejection_ratio
self.similarity_threshold = similarity_threshold
self.significant_min = significant_min
self.min_cluster_size = min_cluster_size
self.min_maxima_ratio = min_maxima_ratio
self.algorithm = algorithm
self.metric = metric
self.metric_params = metric_params
self.p = p
self.leaf_size = leaf_size
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform OPTICS clustering
Extracts an ordered list of points and reachability distances, and
performs initial clustering using `max_eps` distance specified at
OPTICS object instantiation.
Parameters
----------
X : array, shape (n_samples, n_features)
The data.
y : ignored
Returns
-------
self : instance of OPTICS
The instance.
"""
X = check_array(X, dtype=np.float)
n_samples = len(X)
if self.min_samples > n_samples:
raise ValueError("Number of training samples (n_samples=%d) must "
"be greater than min_samples (min_samples=%d) "
"used for clustering." %
(n_samples, self.min_samples))
if self.min_cluster_size <= 0 or (self.min_cluster_size !=
int(self.min_cluster_size)
and self.min_cluster_size > 1):
raise ValueError('min_cluster_size must be a positive integer or '
'a float between 0 and 1. Got %r' %
self.min_cluster_size)
elif self.min_cluster_size > n_samples:
raise ValueError('min_cluster_size must be no greater than the '
'number of samples (%d). Got %d' %
(n_samples, self.min_cluster_size))
# Start all points as 'unprocessed' ##
self.reachability_ = np.empty(n_samples)
self.reachability_.fill(np.inf)
self.core_distances_ = np.empty(n_samples)
self.core_distances_.fill(np.nan)
# Start all points as noise ##
self.labels_ = np.full(n_samples, -1, dtype=int)
nbrs = NearestNeighbors(n_neighbors=self.min_samples,
algorithm=self.algorithm,
leaf_size=self.leaf_size, metric=self.metric,
metric_params=self.metric_params, p=self.p,
n_jobs=self.n_jobs)
nbrs.fit(X)
self.core_distances_[:] = nbrs.kneighbors(X,
self.min_samples)[0][:, -1]
self.ordering_ = self._calculate_optics_order(X, nbrs)
indices_, self.labels_ = _extract_optics(self.ordering_,
self.reachability_,
self.maxima_ratio,
self.rejection_ratio,
self.similarity_threshold,
self.significant_min,
self.min_cluster_size,
self.min_maxima_ratio)
self.core_sample_indices_ = indices_
return self
# OPTICS helper functions
def _calculate_optics_order(self, X, nbrs):
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
ordering_idx = 0
for point in range(X.shape[0]):
if processed[point]:
continue
if self.core_distances_[point] <= self.max_eps:
while not processed[point]:
processed[point] = True
ordering[ordering_idx] = point
ordering_idx += 1
point = self._set_reach_dist(point, processed, X, nbrs)
else: # For very noisy points
ordering[ordering_idx] = point
ordering_idx += 1
processed[point] = True
return ordering
def _set_reach_dist(self, point_index, processed, X, nbrs):
P = X[point_index:point_index + 1]
indices = nbrs.radius_neighbors(P, radius=self.max_eps,
return_distance=False)[0]
# Getting indices of neighbors that have not been processed
unproc = np.compress((~np.take(processed, indices)).ravel(),
indices, axis=0)
# Keep n_jobs = 1 in the following lines...please
if not unproc.size:
# Everything is already processed. Return to main loop
return point_index
if self.metric == 'precomputed':
dists = X[point_index, unproc]
else:
dists = pairwise_distances(P, np.take(X, unproc, axis=0),
self.metric, n_jobs=None).ravel()
rdists = np.maximum(dists, self.core_distances_[point_index])
new_reach = np.minimum(np.take(self.reachability_, unproc), rdists)
self.reachability_[unproc] = new_reach
# Define return order based on reachability distance
return (unproc[quick_scan(np.take(self.reachability_, unproc),
dists)])
def extract_dbscan(self, eps):
"""Performs DBSCAN extraction for an arbitrary epsilon.
Extraction runs in linear time. Note that if the `max_eps` OPTICS
parameter was set to < inf for extracting reachability and ordering
arrays, DBSCAN extractions will be unstable for `eps` values close to
`max_eps`. Setting `eps` < (`max_eps` / 5.0) will guarantee
extraction parity with DBSCAN.
Parameters
----------
eps : float or int, required
DBSCAN `eps` parameter. Must be set to < `max_eps`. Equivalence
with DBSCAN algorithm is achieved if `eps` is < (`max_eps` / 5)
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
check_is_fitted(self, 'reachability_')
if eps > self.max_eps:
raise ValueError('Specify an epsilon smaller than %s. Got %s.'
% (self.max_eps, eps))
if eps * 5.0 > (self.max_eps * 1.05):
warnings.warn(
"Warning, max_eps (%s) is close to eps (%s): "
"Output may be unstable." % (self.max_eps, eps),
RuntimeWarning, stacklevel=2)
# Stability warning is documented in _extract_dbscan method...
return _extract_dbscan(self.ordering_, self.core_distances_,
self.reachability_, eps)
def _extract_dbscan(ordering, core_distances, reachability, eps):
"""Performs DBSCAN extraction for an arbitrary epsilon (`eps`).
Parameters
----------
ordering : array, shape (n_samples,)
OPTICS ordered point indices (`ordering_`)
core_distances : array, shape (n_samples,)
Distances at which points become core (`core_distances_`)
reachability : array, shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`)
eps : float or int
DBSCAN `eps` parameter
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
n_samples = len(core_distances)
is_core = np.zeros(n_samples, dtype=bool)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
is_core[near_core] = True
return np.arange(n_samples)[is_core], labels
def _extract_optics(ordering, reachability, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001):
"""Performs automatic cluster extraction for variable density data.
Parameters
----------
ordering : array, shape (n_samples,)
OPTICS ordered point indices (`ordering_`)
reachability : array, shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`)
maxima_ratio : float, optional
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional
Used to determine neighborhood size for minimum cluster membership.
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
# Extraction wrapper
reachability = reachability / np.max(reachability[1:])
reachability_plot = reachability[ordering].tolist()
root_node = _automatic_cluster(reachability_plot, ordering,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio)
leaves = _get_leaves(root_node, [])
# Start cluster id's at 0
clustid = 0
n_samples = len(reachability)
is_core = np.zeros(n_samples, dtype=bool)
labels = np.full(n_samples, -1, dtype=int)
# Start all points as non-core noise
for leaf in leaves:
index = ordering[leaf.start:leaf.end]
labels[index] = clustid
is_core[index] = 1
clustid += 1
return np.arange(n_samples)[is_core], labels
def _automatic_cluster(reachability_plot, ordering,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio):
"""Converts reachability plot to cluster tree and returns root node.
Parameters
----------
reachability_plot : list, required
Reachability distances ordered by OPTICS ordering index.
"""
min_neighborhood_size = 2
if min_cluster_size <= 1:
min_cluster_size = max(2, min_cluster_size * len(ordering))
neighborhood_size = int(min_maxima_ratio * len(ordering))
# Again, should this check < min_samples, should the parameter be public?
if neighborhood_size < min_neighborhood_size:
neighborhood_size = min_neighborhood_size
local_maxima_points = _find_local_maxima(reachability_plot,
neighborhood_size)
root_node = _TreeNode(ordering, 0, len(ordering), None)
_cluster_tree(root_node, None, local_maxima_points,
reachability_plot, ordering, min_cluster_size,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
return root_node
class _TreeNode(object):
# automatic cluster helper classes and functions
def __init__(self, points, start, end, parent_node):
self.points = points
self.start = start
self.end = end
self.parent_node = parent_node
self.children = []
self.split_point = -1
def _is_local_maxima(index, reachability_plot, neighborhood_size):
right_idx = slice(index + 1, index + neighborhood_size + 1)
left_idx = slice(max(1, index - neighborhood_size - 1), index)
return (np.all(reachability_plot[index] >= reachability_plot[left_idx]) and
np.all(reachability_plot[index] >= reachability_plot[right_idx]))
def _find_local_maxima(reachability_plot, neighborhood_size):
local_maxima_points = {}
# 1st and last points on Reachability Plot are not taken
# as local maxima points
for i in range(1, len(reachability_plot) - 1):
# if the point is a local maxima on the reachability plot with
# regard to neighborhood_size, insert it into priority queue and
# maxima list
if (reachability_plot[i] > reachability_plot[i - 1] and
reachability_plot[i] >= reachability_plot[i + 1] and
_is_local_maxima(i, np.array(reachability_plot),
neighborhood_size) == 1):
local_maxima_points[i] = reachability_plot[i]
return sorted(local_maxima_points,
key=local_maxima_points.__getitem__, reverse=True)
def _cluster_tree(node, parent_node, local_maxima_points,
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min):
"""Recursively builds cluster tree to hold hierarchical cluster structure
node is a node or the root of the tree in the first call
parent_node is parent node of N or None if node is root of the tree
local_maxima_points is list of local maxima points sorted in
descending order of reachability
"""
if len(local_maxima_points) == 0:
return # parent_node is a leaf
# take largest local maximum as possible separation between clusters
s = local_maxima_points[0]
node.split_point = s
local_maxima_points = local_maxima_points[1:]
# create two new nodes and add to list of nodes
node_1 = _TreeNode(reachability_ordering[node.start:s],
node.start, s, node)
node_2 = _TreeNode(reachability_ordering[s + 1:node.end],
s + 1, node.end, node)
local_max_1 = []
local_max_2 = []
for i in local_maxima_points:
if i < s:
local_max_1.append(i)
if i > s:
local_max_2.append(i)
node_list = []
node_list.append((node_1, local_max_1))
node_list.append((node_2, local_max_2))
if reachability_plot[s] < significant_min:
node.split_point = -1
# if split_point is not significant, ignore this split and continue
return
# only check a certain ratio of points in the child
# nodes formed to the left and right of the maxima
# ...should check_ratio be a user settable parameter?
check_ratio = .8
check_value_1 = int(np.round(check_ratio * len(node_1.points)))
check_value_2 = int(np.round(check_ratio * len(node_2.points)))
avg_reach1 = np.mean(reachability_plot[(node_1.end -
check_value_1):node_1.end])
avg_reach2 = np.mean(reachability_plot[node_2.start:(node_2.start
+ check_value_2)])
if ((avg_reach1 / reachability_plot[s]) > maxima_ratio or
(avg_reach2 / reachability_plot[s]) > maxima_ratio):
if (avg_reach1 / reachability_plot[s]) < rejection_ratio:
# reject node 2
node_list.remove((node_2, local_max_2))
if (avg_reach2 / reachability_plot[s]) < rejection_ratio:
# reject node 1
node_list.remove((node_1, local_max_1))
if ((avg_reach1 / reachability_plot[s]) >= rejection_ratio and
(avg_reach2 / reachability_plot[s]) >= rejection_ratio):
# since split_point is not significant,
# ignore this split and continue (reject both child nodes)
node.split_point = -1
_cluster_tree(node, parent_node, local_maxima_points,
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
return
# remove clusters that are too small
if (len(node_1.points) < min_cluster_size and
node_list.count((node_1, local_max_1)) > 0):
# cluster 1 is too small
node_list.remove((node_1, local_max_1))
if (len(node_2.points) < min_cluster_size and
node_list.count((node_2, local_max_2)) > 0):
# cluster 2 is too small
node_list.remove((node_2, local_max_2))
if not node_list:
# parent_node will be a leaf
node.split_point = -1
return
# Check if nodes can be moved up one level - the new cluster created
# is too "similar" to its parent, given the similarity threshold.
bypass_node = 0
if parent_node is not None:
if ((node.end - node.start) / (parent_node.end - parent_node.start) >
similarity_threshold):
parent_node.children.remove(node)
bypass_node = 1
for nl in node_list:
if bypass_node == 1:
parent_node.children.append(nl[0])
_cluster_tree(nl[0], parent_node, nl[1],
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
else:
node.children.append(nl[0])
_cluster_tree(nl[0], node, nl[1], reachability_plot,
reachability_ordering, min_cluster_size,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
def _get_leaves(node, arr):
if node is not None:
if node.split_point == -1:
arr.append(node)
for n in node.children:
_get_leaves(n, arr)
return arr
| bsd-3-clause |
q1ang/seaborn | seaborn/utils.py | 19 | 15509 | """Small plotting-related utility functions."""
from __future__ import print_function, division
import colorsys
import warnings
import os
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from .external.six.moves.urllib.request import urlopen, urlretrieve
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None):
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get rgb tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it."""
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or None (default), optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward).
trim : bool, optional
If true, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
_set_spine_position(ax_i.spines[side], ('outward', offset))
# Set the ticks appropriately
if bottom:
ax_i.xaxis.tick_top()
if top:
ax_i.xaxis.tick_bottom()
if left:
ax_i.yaxis.tick_right()
if right:
ax_i.yaxis.tick_left()
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = ax_i.get_xticks()
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = ax_i.get_yticks()
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def offset_spines(offset=10, fig=None, ax=None):
"""Simple function to offset spines away from axes.
Use this immediately after creating figure and axes objects.
Offsetting spines after plotting or manipulating the axes
objects may result in loss of labels, ticks, and formatting.
Parameters
----------
offset : int, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine
Returns
-------
None
"""
warn_msg = "`offset_spines` is deprecated and will be removed in v0.5"
warnings.warn(warn_msg, UserWarning)
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for spine in ax_i.spines.values():
_set_spine_position(spine, ('outward', offset))
def _set_spine_position(spine, position):
"""
Set the spine's position without resetting an associated axis.
As of matplotlib v. 1.0.0, if a spine has an associated axis, then
spine.set_position() calls axis.cla(), which resets locators, formatters,
etc. We temporarily replace that call with axis.reset_ticks(), which is
sufficient for our purposes.
"""
axis = spine.axis
if axis is not None:
cla = axis.cla
axis.cla = axis.reset_ticks
spine.set_position(position)
if axis is not None:
axis.cla = cla
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return percentiles(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values."""
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-directory-link"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return the path of the seaborn data directory.
This is used by the ``load_dataset`` function.
If the ``data_home`` argument is not specified, the default location
is ``~/seaborn-data``.
Alternatively, a different default location can be specified using the
environment variable ``SEABORN_DATA``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load a dataset from the online repository (requires internet).
Parameters
----------
name : str
Name of the dataset (`name`.csv on
https://github.com/mwaskom/seaborn-data). You can obtain list of
available datasets using :func:`get_dataset_names`
cache : boolean, optional
If True, then cache data locally and use the cache on subsequent calls
data_home : string, optional
The directory in which to cache data. By default, uses ~/seaborn_data/
kws : dict, optional
Passed to pandas.read_csv
"""
path = "https://github.com/mwaskom/seaborn-data/raw/master/{0}.csv"
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
if not pandas_has_categoricals:
return df
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
return df
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macosx backend rasies an error in the above code
return False
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels()))
def categorical_order(values, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
values : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(values, "categories"):
order = values.categories
else:
try:
order = values.cat.categories
except (TypeError, AttributeError):
try:
order = values.unique()
except AttributeError:
order = pd.unique(values)
try:
np.asarray(values).astype(np.float)
order = np.sort(order)
except (ValueError, TypeError):
order = order
order = filter(pd.notnull, order)
return list(order)
| bsd-3-clause |
mdbartos/RIPS | tools/rect_grid.py | 1 | 4297 | import numpy as np
import pandas as pd
from shapely import geometry
import geopandas as gpd
def rect_grid(bbox, hdim, vdim=None, out='polygon', pointref='centroid',
how='fixed_step', anchor_point='ll', endpoint=True):
if isinstance(bbox, (gpd.geoseries.GeoSeries,
gpd.geodataframe.GeoDataFrame)):
bbox = bbox.total_bounds
elif isinstance(bbox, (geometry.Point, geometry.LineString,
geometry.LinearRing, geometry.Polygon,
geometry.MultiPoint, geometry.MultiLineString,
geometry.MultiPolygon)):
bbox = bbox.bounds
x0 = bbox[0]
y0 = bbox[1]
x1 = bbox[2]
y1 = bbox[3]
if how == 'fixed_step':
hstep = hdim
if vdim == None:
vstep = hstep
else:
vstep = vdim
if anchor_point.lower()[1] == 'l':
x = np.arange(x0, x1, hstep, dtype=float)
elif anchor_point.lower()[1] == 'r':
x = np.arange(x1, x0, -hstep, dtype=float)[::-1]
if anchor_point.lower()[0] == 'l':
y = np.arange(y0, y1, vstep, dtype=float)
elif anchor_point.lower()[0] == 'u':
y = np.arange(y1, y0, -vstep, dtype=float)[::-1]
elif how == 'fixed_number':
if vdim == None:
vdim = hdim
if anchor_point.lower()[1] == 'l':
x, hstep = np.linspace(x0, x1, hdim, retstep=True, dtype=float, endpoint=endpoint)
elif anchor_point.lower()[1] == 'r':
x, hstep = np.linspace(x1, x0, hdim, retstep=True, dtype=float, endpoint=endpoint)
x, hstep = x[::-1], -hstep
if anchor_point.lower()[0] == 'l':
y, vstep = np.linspace(y0, y1, vdim, retstep=True, dtype=float, endpoint=endpoint)
elif anchor_point.lower()[0] == 'u':
y, vstep = np.linspace(y1, y0, vdim, retstep=True, dtype=float, endpoint=endpoint)
y, vstep = y[::-1], -vstep
xy_ll = np.vstack(np.dstack(np.meshgrid(x, y)))
if out == 'point':
if pointref == 'centroid':
out_arr = np.column_stack([xy_ll[:,0] + hstep/2.0,
xy_ll[:,1] + vstep/2.0])
return gpd.GeoSeries(map(geometry.asPoint, out_arr))
elif pointref == 'll':
return gpd.GeoSeries(map(geometry.asPoint, xy_ll))
elif pointref == 'lr':
out_arr = np.column_stack([xy_ll[:,0] + hstep, xy_ll[:,1]])
return gpd.GeoSeries(map(geometry.asPoint, out_arr))
elif pointref == 'ur':
out_arr = np.column_stack([xy_ll[:,0] + hstep, xy_ll[:,1] + vstep])
return gpd.GeoSeries(map(geometry.asPoint, out_arr))
elif pointref == 'ul':
out_arr = np.column_stack([xy_ll[:,0], xy_ll[:,1] + vstep])
return gpd.GeoSeries(map(geometry.asPoint, out_arr))
elif out == 'line':
# if how == 'fixed_step':
# y1 = y1 + vstep
# x1 = x1 + hstep
vlines = np.hstack([
np.column_stack([x, np.repeat(y0, len(x))]),
np.column_stack([x, np.repeat(y1, len(x))])
])
vlines = vlines.reshape(vlines.shape[0], 2, 2)
hlines = np.hstack([
np.column_stack([np.repeat(x0, len(y)), y]),
np.column_stack([np.repeat(x1, len(y)), y])
])
hlines = hlines.reshape(hlines.shape[0], 2, 2)
out_arr = np.vstack([vlines, hlines])
del vlines
del hlines
return gpd.GeoSeries(map(geometry.asLineString, out_arr))
elif out == 'polygon':
xy_ur = np.column_stack([xy_ll[:,0] + hstep, xy_ll[:,1] + vstep])
out_arr = np.column_stack([
xy_ur,
np.column_stack([xy_ll[:,0], xy_ll[:,1] + vstep]),
xy_ll,
np.column_stack([xy_ll[:,0] + hstep, xy_ll[:,1]]),
xy_ur
])
del xy_ll
del xy_ur
out_arr = out_arr.reshape(out_arr.shape[0], out_arr.shape[1]/2, 2)
return gpd.GeoSeries(map(geometry.asPolygon, out_arr))
| mit |
jaidevd/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
miltonsarria/dsp-python | audio/4_stft-system.py | 1 | 1556 | import numpy as np
import time, os, sys
import wav_process as wp
import matplotlib.pyplot as plt
#from scipy.signal import hamming
from wav_rw import wavread, wavwrite
from scipy.signal import get_window
#seleccionar el archivo de audio, el tipo de ventana, tamaño, numero de puntos de la FFT y traslape
(fs, x) = wavread('sound/speech-male.wav')
#window puede ser rectangular, hanning, hamming, blackman, blackmanharris...........
window = 'rectangular'
M= 1024
N = 1024
H = 500
#generar ventana y normalizarla
w = get_window(window, M)
#obtener la transformada de fourier del archivo de audio (magnitud y la fase)
mX, pX = wp.stftAnal(x, w, N, H)
#usando la magnitud y fase reconstuir el archivo de audio
y = wp.stftSynth(mX, pX, w.size, H)
#realizar las graficas respectivas para comparar los resultados
plt.figure(1, figsize=(9.5, 7))
plt.subplot(311)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.title('x (audio original wav)')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.subplot(312)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX, M='+str(M)+', N='+str(N)+', H='+str(H))
plt.autoscale(tight=True)
plt.subplot(313)
plt.title('x (audio reconstruido wav)')
plt.plot(np.arange(y.size)/float(fs), y,'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
wavwrite(y, fs, 'output/audio_stft.wav')
plt.show()
| mit |
deepnarainsingh/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/decomposition/truncated_svd.py | 19 | 7884 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| unlicense |
Titan-C/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ycool/apollo | modules/tools/navigation/planning/obstacle_decider.py | 3 | 7929 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from shapely.geometry import LineString
from shapely.geometry import Point
class ObstacleDecider:
def __init__(self):
self.obstacle_lat_ttc = {}
self.obstacle_lon_ttc = {}
self.obstacle_lat_dist = {}
self.obstacle_lon_dist = {}
self.front_edge_to_center = 3.89
self.back_edge_to_center = 1.043
self.left_edge_to_center = 1.055
self.right_edge_to_center = 1.055
self.LAT_DIST = 0.9
self.mobileye = None
self.path_obstacle_processed = False
self.default_lane_width = 3.3
def update(self, mobileye):
self.mobileye = mobileye
self.path_obstacle_processed = False
def process_path_obstacle(self, fpath):
if self.path_obstacle_processed:
return
path_x, path_y = fpath.get_xy()
self.obstacle_lat_dist = {}
path = []
self.mobileye.process_obstacles()
for i in range(len(path_x)):
path.append((path_x[i], path_y[i]))
line = LineString(path)
for obs_id, obstacle in self.mobileye.obstacles.items():
point = Point(obstacle.x, obstacle.y)
dist = line.distance(point)
if dist < self.LAT_DIST + obstacle.width + self.left_edge_to_center:
proj_len = line.project(point)
if proj_len == 0 or proj_len >= line.length:
continue
p1 = line.interpolate(proj_len)
if (proj_len + 1) > line.length:
p2 = line.interpolate(line.length)
else:
p2 = line.interpolate(proj_len + 1)
d = (point.x - p1.x) * (p2.y - p1.y) - (point.y - p1.y) * (
p2.x - p1.x)
if d > 0:
dist *= -1
self.obstacle_lat_dist[obstacle.obstacle_id] = dist
self.path_obstacle_processed = True
# print self.obstacle_lat_dist
def get_adv_left_right_nudgable_dist(self, fpath):
left_nudgable = 0
right_nudgable = 0
routing_y = fpath.init_y()
if routing_y <= 0:
left_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.right_edge_to_center
else:
left_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.right_edge_to_center
return left_nudgable, -1 * right_nudgable
def get_nudge_distance(self, left_nudgable, right_nudgable):
left_nudge = None
right_nudge = None
for obs_id, lat_dist in self.obstacle_lat_dist.items():
if lat_dist >= 0:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if right_nudge is None:
right_nudge = -1 * (self.LAT_DIST - actual_dist)
elif right_nudge > -1 * (self.LAT_DIST - actual_dist):
right_nudge = -1 * (self.LAT_DIST - actual_dist)
else:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if left_nudge is None:
left_nudge = self.LAT_DIST - actual_dist
elif left_nudge < self.LAT_DIST - actual_dist:
left_nudge = self.LAT_DIST - actual_dist
if left_nudge is None and right_nudge is None:
return 0
if left_nudge is not None and right_nudge is not None:
return 0
if left_nudge is not None:
if left_nudgable < left_nudge:
return left_nudgable
else:
return left_nudge
if right_nudge is not None:
if abs(right_nudgable) > abs(right_nudge):
return right_nudgable
else:
return right_nudge
if __name__ == "__main__":
import rospy
from std_msgs.msg import String
import matplotlib.pyplot as plt
from modules.localization.proto import localization_pb2
from modules.canbus.proto import chassis_pb2
from ad_vehicle import ADVehicle
import matplotlib.animation as animation
from modules.drivers.proto import mobileye_pb2
from provider_routing import RoutingProvider
from provider_mobileye import MobileyeProvider
from path_decider import PathDecider
def localization_callback(localization_pb):
ad_vehicle.update_localization(localization_pb)
def routing_callback(routing_str):
routing.update(routing_str)
def chassis_callback(chassis_pb):
ad_vehicle.update_chassis(chassis_pb)
def mobileye_callback(mobileye_pb):
global fpath
mobileye.update(mobileye_pb)
mobileye.process_lane_markers()
fpath = path_decider.get_path(mobileye, routing, ad_vehicle,
obs_decider)
obs_decider.update(mobileye)
obs_decider.process_path_obstacle(fpath)
print "nudge distance = ", obs_decider.get_nudge_distance()
def update(frame):
if not ad_vehicle.is_ready():
return
x = []
y = []
for obs_id, obs in mobileye.obstacles.items():
x.append(obs.x)
y.append(obs.y)
obstacles_points.set_xdata(x)
obstacles_points.set_ydata(y)
if fpath is not None:
px, py = fpath.get_xy()
path_line.set_xdata(px)
path_line.set_ydata(py)
fpath = None
ad_vehicle = ADVehicle()
routing = RoutingProvider()
mobileye = MobileyeProvider()
obs_decider = ObstacleDecider()
path_decider = PathDecider(True, False, False)
rospy.init_node("path_decider_debug", anonymous=True)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/navigation/routing',
String, routing_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
obstacles_points, = ax.plot([], [], 'ro')
path_line, = ax.plot([], [], 'b-')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.set_xlim([-2, 128])
ax.set_ylim([-5, 5])
# ax2.axis('equal')
plt.show()
| apache-2.0 |
glennq/scikit-learn | examples/datasets/plot_iris_dataset.py | 35 | 1929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
n-west/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 26 | 5155 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print 'All histograms contain',num_tests,'realisations.'
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests/float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0])/2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0])/2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0])/2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0])/2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0]/uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0]/gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0]/rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0]/laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
ChayaSt/Torsions | torsionfit/plots.py | 4 | 23291 | """
Plotting module and data exploration for torsionfit
This module contains functions to simplify exploration of torsionfit output in addition to general plotting functions
"""
__author__ = 'Chaya D. Stern'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pymbar
# global parameter
multiplicities = (1, 2, 3, 4, 6)
def get_parameter_names(model, db, n5=False):
"""
returns a dictionary that maps torsion name all associated parameters for convenient trace access in pymc.database
:param model: torsionfit.TorsionFitModel
:param db: pymc.database (can also be pymc.sampler)
:return: dictionary mapping torsion name to all associated parameters
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
torsion_parameters = {}
torsions = model.parameters_to_optimize
for name in torsions:
torsion_name = name[0] + '_' + name[1] + '_' + name[2] + '_' + name[3]
torsion_parameters[torsion_name] = []
multiplicity_bitstring = torsion_name + '_multiplicity_bitstring'
torsion_parameters[torsion_name].append(multiplicity_bitstring)
for m in multiplicities:
k = torsion_name + '_' + str(m) + '_K'
torsion_parameters[torsion_name].append(k)
phase = torsion_name + '_' + str(m) + '_Phase'
torsion_parameters[torsion_name].append(phase)
return torsion_parameters
def get_multiplicity_traces(torsion_parameters, db, n5=False):
"""
returns traces for the multiplicity terms for all torsions in (0, 1)
:param torsion_parameters: dict mapping torsion name to parameters for that torsion or name of torsion
:param db: pymc.database
:return: dict mapping torsion name to multiplicity terms trace
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
if type(torsion_parameters) == str:
torsion_parameters = [torsion_parameters]
else:
torsion_parameters = torsion_parameters.keys()
multiplicity_traces = {}
for torsion_name in torsion_parameters:
multiplicity_bitstring = torsion_name + '_multiplicity_bitstring'
for m in multiplicities:
multiplicity_traces[torsion_name + '_' + str(m)] = []
for i in db.trace(multiplicity_bitstring)[:]:
if 2**(m-1) & int(i):
multiplicity_traces[torsion_name + '_' + str(m)].append(1)
else:
multiplicity_traces[torsion_name + '_' + str(m)].append(0)
return multiplicity_traces
def get_statistics(db, torsion_parameters):
"""
uses pymbar.timeseries.detectEquilibration module to get equilibration time, statistical inefficiency and effective
samples for each trace. Returns a dictionary that maps all parameters to statistics.
:param db: pymc.database (can also use pymc.sampler)
:param torsion_parameters: dict mapping torsion name to associated parameters
:return: dict that maps parameters to statistics
"""
statistics = {}
for parameters in torsion_parameters:
for param in torsion_parameters[parameters]:
statistics[param] = pymbar.timeseries.detectEquilibration(db.trace(param)[:])
return statistics
def trace_plots(name, db, markersize, statistics=False, multiplicity_traces=False, continuous=False, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
try:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db)
except KeyError:
pass
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(9, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 3)
plt.plot(db.trace(name + '_' + str(1) + '_Phase')[:], '.', markersize=markersize, label='Phase')
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
axes_n = plt.subplot(9, 2, 5)
try:
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 7)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 9)
plt.plot(db.trace(name + '_' + str(2) + '_Phase')[:], '.', markersize=markersize, label='Phase')
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
axes_n = plt.subplot(9, 2, 11)
try:
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 13)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([0, 20])
axes_phase = plt.subplot(9, 2, 15)
plt.plot(db.trace(name + '_' + str(3) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
axes_n = plt.subplot(9, 2, 17)
try:
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xlabel('mcmc steps')
except:
pass
axes_k = plt.subplot(9, 2, 2)
plt.title(name, fontweight='bold')
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([])
axes_phase = plt.subplot(9, 2, 4)
plt.plot(db.trace(name + '_' + str(4) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
plt.yticks([])
try:
axes_n = plt.subplot(9, 2, 6)
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(9, 2, 8)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(0, 20)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([])
axes_phase = plt.subplot(9, 2, 10)
plt.plot(db.trace(name + '_' + str(6) + '_Phase')[:], '.', markersize=markersize, label='Phase')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
if continuous:
plt.ylim(-1.0, 181)
plt.yticks([1, 180])
else:
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
plt.yticks([])
axes_n = plt.subplot(9, 2, 12)
try:
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xlabel('mcmc steps')
except:
pass
if filename is None:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def trace_no_phase(name, db, markersize, statistics=False, multiplicity_traces=False, ymin=-20, ymax=20, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
try:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db)
except KeyError:
pass
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(5, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 2)
try:
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 3)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 4)
try:
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 5)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
plt.xlabel('mcmc steps')
axes_n = plt.subplot(5, 2, 6)
plt.title(name, fontweight='bold')
try:
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 7)
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 8)
try:
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xticks([])
except:
pass
axes_k = plt.subplot(5, 2, 9)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([])
plt.xticks([ymin, 0, ymax])
axes_n = plt.subplot(5, 2, 10)
try:
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([])
plt.xlabel('mcmc steps')
except:
pass
if not filename:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def trace_no_phase_n5(name, db, markersize, statistics=False, equil=True, multiplicity_traces=False, ymin=-20, ymax=20, filename=None):
"""
Generate trace plot for all parameters of a given torsion
:param name: str. name of torsion parameter A_B_C_D where A, B, C, and D are atom types.
:param db: pymc.database (can also use pymc.sampler)
:param markersize: int.
:param statistics: dict that maps parameters to statistics from pymbar.timeseries.detectEquilibrium. Default: False
:param multiplicity_traces: dict that maps multiplicity term to (0,1) trace. Default is False.
"""
if not multiplicity_traces:
multiplicity_traces = get_multiplicity_traces(torsion_parameters=name, db=db, n5=True)
pp = PdfPages('%s_traces.pdf' % name)
fig = plt.figure()
axes_k = plt.subplot(6, 2, 1)
plt.plot(db.trace(name + '_' + str(1) + '_K')[:], 'k.', markersize=markersize, label='K')
plt.title(name, fontweight='bold')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '1' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(1) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.xticks([])
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(6, 2, 2)
plt.title(name, fontweight='bold')
plt.plot(multiplicity_traces[name + '_' + str(1)], 'k.', markersize=markersize, label='1')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 3)
plt.plot(db.trace(name + '_' + str(2) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '2' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(2) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
axes_n = plt.subplot(6, 2, 4)
plt.plot(multiplicity_traces[name + '_' + str(2)], 'k.', markersize=markersize, label='2')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 5)
plt.plot(db.trace(name + '_' + str(3) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '3' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(3) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.ylabel('kJ/mole')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.yticks([ymin, 0, ymax])
plt.xticks([])
axes_n = plt.subplot(6, 2, 6)
plt.plot(multiplicity_traces[name + '_' + str(3)], 'k.', markersize=markersize, label='3')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 7)
plt.plot(db.trace(name + '_' + str(4) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '4' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(4) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
axes_n = plt.subplot(6, 2, 8)
plt.plot(multiplicity_traces[name + '_' + str(4)], 'k.', markersize=markersize, label='4')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 9)
plt.plot(db.trace(name + '_' + str(5) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '5' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(5) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xticks([])
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
axes_n = plt.subplot(6, 2, 10)
plt.plot(multiplicity_traces[name + '_' + str(5)], 'k.', markersize=markersize, label='5')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xticks([])
axes_k = plt.subplot(6, 2, 11)
plt.plot(db.trace(name + '_' + str(6) + '_K')[:], 'k.', markersize=markersize, label='K')
if equil:
if statistics:
axes_k.axvline(statistics[name + '_' + '6' + '_K'][0], color='red', lw=1)
else:
axes_k.axvline(pymbar.timeseries.detectEquilibration(db.trace(name + '_' + str(6) + '_K')[:])[0], color='red',
lw=1)
plt.ylim(ymin, ymax)
plt.yticks([ymin, 0, ymax])
plt.ylabel('KJ/mol')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.xlabel('mcmc steps')
axes_n = plt.subplot(6, 2, 12)
plt.plot(multiplicity_traces[name + '_' + str(6)], 'k.', markersize=markersize, label='6')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.xlabel('mcmc steps')
if not filename:
fig.savefig('%s_traces.pdf' % name)
else:
fig.savefig(filename)
pp.savefig(fig, dpi=80)
pp.close()
def marg_mult(model, db, samples, burn=0, filename=None, n5=False):
"""
generates histogram for marginal distribution of posterior multiplicities.
:param model: TorsionFitModel
:param db: pymc.database for model
:param samples: length of trace
:param burn: int. number of steps to skip
:param filename: filename for plot to save
"""
if n5:
multiplicities = tuple(range(1, 7))
else:
multiplicities = (1, 2, 3, 4, 6)
mult_bitstring = []
for i in model.pymc_parameters.keys():
if i.split('_')[-1] == 'bitstring':
mult_bitstring.append(i)
if n5:
histogram = np.zeros((len(mult_bitstring), samples, 5))
else:
histogram = np.zeros((len(mult_bitstring), samples, 5))
for m, torsion in enumerate(mult_bitstring):
for i, j in enumerate(db.trace('%s' % torsion)[burn:]):
for k, l in enumerate(multiplicities):
if 2**(l-1) & int(j):
histogram[m][i][k] = 1
plt.matshow(histogram.sum(1), cmap='Blues', extent=[0, 5, 0, 20]), plt.colorbar()
plt.yticks([])
plt.xlabel('multiplicity term')
plt.ylabel('torsion')
if filename:
plt.savefig(filename)
| gpl-2.0 |
pastewka/lammps | examples/SPIN/test_problems/validation_damped_precession/llg_precession.py | 9 | 1646 | #!/usr/bin/env python3
import numpy as np , pylab, tkinter
import math
import matplotlib.pyplot as plt
import mpmath as mp
mub=5.78901e-5 # Bohr magneton (eV/T)
hbar=0.658212 # Planck's constant (eV.fs/rad)
g=2.0 # Lande factor (adim)
gyro=g*mub/hbar # gyromag ratio (rad/fs/T)
alpha=0.01 # damping coefficient
pi=math.pi
Bnrm=10.0 # mag. field (T)
Bext = np.array([0.0, 0.0, 1.0])
Sn = 2.0 # spin norm (in # of muB)
S = np.array([1.0, 0.0, 0.0])
N=500000 # number of timesteps
dt=0.1 # timestep (fs)
# Rodrigues rotation formula
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians
"""
axis = np.asarray(axis)
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# calc. precession field
def calc_rot_vector(Fi,Sp):
rot = gyro*Sn*Bnrm*(Fi-alpha*np.cross(Fi,Sp))
return rot
# np.set_printoptions(precision=4)
for t in range (0,N):
wf = calc_rot_vector(Bext,S)
theta=dt*np.linalg.norm(wf)
axis=wf/np.linalg.norm(wf)
S = np.dot(rotation_matrix(axis, theta), S)
en = -hbar*gyro*Sn*Bnrm*np.dot(S,Bext)
# print res. in ps for comparison with LAMMPS
print(t*dt/1000.0,S[0],S[1],S[2],en)
| gpl-2.0 |
numenta-archive/htmresearch | projects/capybara/supervised_baseline/v1_no_sequences/run_baseline.py | 9 | 11821 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import csv
import json
import os
import pandas as pd
import time
import numpy as np
import yaml
from baseline_utils import (create_model,
convert_to_one_hot, save_keras_model,
load_keras_model)
def _get_union(df, ma_window, column_name):
window = [df[column_name].values[i] for i in range(ma_window)]
padding_value = np.mean(window, axis=0)
union = [padding_value for _ in range(ma_window)]
for k in range(ma_window, len(df)):
window = [df[column_name].values[k - ma_window + i]
for i in range(ma_window)]
union.append(np.mean(window, axis=0))
return np.array(union)
def _convert_df(df, y_dim, ma_window, column_name):
if ma_window > 0:
X = _get_union(df, ma_window, column_name)
else:
X = df[column_name].values
y_labels = df.label.values
y = convert_to_one_hot(y_labels, y_dim)
t = df.t.values
X_values = df.scalarValue.values
return t, X, X_values, y, y_labels
def _convert_patternNZ_json_string_to_sdr(patternNZ_json_string, sdr_width):
patternNZ = np.array(json.loads(patternNZ_json_string), dtype=int)
sdr = np.zeros(sdr_width)
sdr[patternNZ] = 1
return sdr
def _sdr_converter(sdr_width):
return lambda x: _convert_patternNZ_json_string_to_sdr(x, sdr_width)
def _train_on_chunks(model, output_dim, input_dim, input_file,
history_writer, ma_window, chunk_size,
batch_size, num_epochs, input_name):
"""
Don't load all the data in memory. Read it chunk by chunk to train the model.
:param model: (keras.Model) model to train.
:param output_dim: (int) dimension of the output layer.
:param input_dim: (int) dimension of the input layer.
:param input_file: (str) path to the input training set.
:param history_writer: (csv.writer) file writer.
"""
start = time.time()
for epoch in range(num_epochs):
print 'Epoch %s/%s' % (epoch, num_epochs)
# Note: http://stackoverflow.com/a/1271353
df_generator = pd.read_csv(
input_file, chunksize=chunk_size, iterator=True,
converters={
'tmPredictedActiveCells': _sdr_converter(2048 * 32),
'tmActiveCells': _sdr_converter(2048 * 32),
'spActiveColumns': _sdr_converter(2048)
},
usecols=['t', 'label', 'scalarValue', 'spActiveColumns',
'tmActiveCells', 'tmPredictedActiveCells'])
chunk_counter = 0
for df in df_generator:
t, X, X_values, y, y_labels = _convert_df(df, output_dim, ma_window,
input_name)
hist = model.fit(X, y, validation_split=0.0,
batch_size=batch_size, shuffle=False,
verbose=0, nb_epoch=1)
acc = hist.history['acc']
loss = hist.history['loss']
assert len(acc) == 1 # Should be only one epoch
history_writer.writerow([epoch, acc[0], loss[0]])
chunk_counter += 1
# Print elapsed time and # of rows processed.
now = int(time.time() - start)
row_id = chunk_size * chunk_counter
print '-> Elapsed train time: %ss - Rows processed: %s' % (now, row_id)
def _train(model, output_dim, input_dim, input_file, history_writer,
ma_window, chunk_size, batch_size, num_epochs, input_name):
"""
Load all the data in memory and train the model.
:param model: (keras.Model) model to train.
:param output_dim: (int) dimension of the output layer.
:param input_dim: (int) dimension of the input layer.
:param input_file: (str) path to the input training set.
:param history_writer: (csv.writer) file writer.
"""
start = time.time()
df = pd.read_csv(
input_file,
converters={
'tmPredictedActiveCells': _sdr_converter(2048 * 32),
'tmActiveCells': _sdr_converter(2048 * 32),
'spActiveColumns': _sdr_converter(2048)
},
usecols=['t', 'label', 'scalarValue', 'spActiveColumns',
'tmActiveCells', 'tmPredictedActiveCells'])
t, X, X_values, y, y_labels = _convert_df(df, output_dim, ma_window, input_name)
hist = model.fit(X, y, validation_split=0.0,
batch_size=batch_size, shuffle=False,
verbose=1, nb_epoch=num_epochs)
acc = hist.history['acc']
loss = hist.history['loss']
for epoch in range(num_epochs):
history_writer.writerow([epoch, acc[epoch], loss[epoch]])
print 'Elapsed time: %s' % (time.time() - start)
def _train_and_save_model(model_path, model_history_path,
input_dim, output_dim, lazy, train_file,
ma_window, chunk_size, batch_size, num_epochs,
input_name):
"""
Train model, save train history and trained model.
:param model_path: (str) path to serialized model.
:param model_history_path: (str) path to model train history.
:param input_dim: (int) input layer dimension.
:param output_dim: (int) output layer dimension.
:param lazy: (bool) whether to load the whole input file in memory or to
read it lazily in chunks.
:return model: (keras.Model) trained model.
"""
model = create_model(input_dim, output_dim)
with open(model_history_path, 'a') as historyFile:
history_writer = csv.writer(historyFile)
history_writer.writerow(['epoch', 'acc', 'loss'])
if lazy:
_train_on_chunks(model, output_dim, input_dim, train_file,
history_writer, ma_window, chunk_size,
batch_size, num_epochs, input_name)
else:
_train(model, output_dim, input_dim, train_file, history_writer,
ma_window, chunk_size, batch_size, num_epochs, input_name)
save_keras_model(model, model_path)
print 'Trained model saved:', model_path
print 'Training history saved:', model_history_path
return model
def _test_model(model, predictions_history_path, input_dim, output_dim,
test_file, chunk_size, ma_window, input_name):
"""
Evaluate model on test set and save prediction history.
:param model: (keras.Model) trained model.
:param predictions_history_path: (str) path to prediction history file.
:param input_dim: (int) input layer dimension.
:param output_dim: (int) output layer dimension.
"""
start = time.time()
chunks = pd.read_csv(
test_file, iterator=True, chunksize=chunk_size,
converters={
'tmPredictedActiveCells': _sdr_converter(2048 * 32),
'tmActiveCells': _sdr_converter(2048 * 32),
'spActiveColumns': _sdr_converter(2048)
},
usecols=['t', 'label', 'scalarValue', 'spActiveColumns',
'tmActiveCells', 'tmPredictedActiveCells'])
with open(predictions_history_path, 'a') as f:
pred_writer = csv.writer(f)
pred_writer.writerow(['t', 'scalar_value', 'y_pred', 'y_true'])
chunk_counter = 0
for chunk in chunks:
t, X, X_values, y, y_labels = _convert_df(chunk, output_dim, ma_window, input_name)
y_pred = model.predict_classes(X)
y_true = y_labels
for i in range(len(y_pred)):
pred_writer.writerow([t[i], X_values[i], y_pred[i], y_true[i]])
now = int(time.time() - start)
row_id = chunk_size * chunk_counter
print '\nElapsed test time: %ss - Row: %s' % (now, row_id)
chunk_counter += 1
print 'Elapsed time: %ss' % (time.time() - start)
print 'Test prediction history saved:', predictions_history_path
def _getConfig(configFilePath):
with open(configFilePath, 'r') as ymlFile:
config = yaml.load(ymlFile)
input_dir = config['inputs']['input_dir']
train_file_name = config['inputs']['train_file_name']
test_file_name = config['inputs']['test_file_name']
metric_name = config['inputs']['metric_name']
results_output_dir = config['outputs']['results_output_dir']
model_output_dir = config['outputs']['model_output_dir']
history_file = config['outputs']['history_file']
prediction_file = config['outputs']['prediction_file']
model_name = config['outputs']['model_name']
chunk_size = config['params']['chunk_size']
batch_size = config['params']['batch_size']
num_epochs = config['params']['num_epochs']
ma_window = config['params']['ma_window']
input_dim = config['params']['input_dim']
output_dim = config['params']['output_dim']
labels = config['params']['labels']
lazy = config['params']['lazy']
train = config['params']['train']
return (input_dir,
train_file_name,
test_file_name,
metric_name,
results_output_dir,
model_output_dir,
history_file,
prediction_file,
model_name,
chunk_size,
batch_size,
num_epochs,
ma_window,
input_dim,
output_dim,
labels,
lazy,
train)
def main():
# Get input args
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c',
dest='config',
type=str,
default='configs/body_acc_x.yml',
help='Name of YAML config file.')
options = parser.parse_args()
configFile = options.config
# Get config options.
(input_dir,
train_file_name,
test_file_name,
metric_name,
results_output_dir,
model_output_dir,
history_file,
prediction_file,
model_name,
chunk_size,
batch_size,
num_epochs,
ma_window,
input_dim,
output_dim,
labels,
lazy,
train) = _getConfig(configFile)
train_file = os.path.join(input_dir, train_file_name)
test_file = os.path.join(input_dir, test_file_name)
# Model dimensions
print 'input_dim', input_dim
print 'output_dim', output_dim
print 'train', train
print ''
# Make sure output directories exist
if not os.path.exists(results_output_dir):
os.makedirs(results_output_dir)
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
model_path = os.path.join(model_output_dir, model_name)
# Clean model history
model_history_path = os.path.join(results_output_dir, history_file)
if os.path.exists(model_history_path):
os.remove(model_history_path)
# Clean predictions history
prediction_history_path = os.path.join(results_output_dir, prediction_file)
if os.path.exists(prediction_history_path):
os.remove(prediction_history_path)
# Train
if train:
model = _train_and_save_model(model_path, model_history_path,
input_dim, output_dim, lazy, train_file,
ma_window, chunk_size, batch_size,
num_epochs, metric_name)
else:
model = load_keras_model(model_path)
# Test
_test_model(model, prediction_history_path, input_dim, output_dim, test_file,
chunk_size, ma_window, metric_name)
if __name__ == "__main__":
main()
| agpl-3.0 |
Tarrasch/luigi | examples/pyspark_wc.py | 17 | 3388 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.contrib.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100, significant=False)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
butala/pyrsss | pyrsss/util/ellipse.py | 1 | 2736 | import math
import numpy as NP
from numpy.linalg import eig, inv
"""
From http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
Reference:
Fitzgibbon, Pilu and Fischer in Fitzgibbon, A.W., Pilu, M., and
Fischer R.B., Direct least squares fitting of ellipsees, Proc. of the
13th Internation Conference on Pattern Recognition, pp 253-257,
Vienna, 1996.
"""
def fit_ellipse(x,y):
x = x[:,NP.newaxis]
y = y[:,NP.newaxis]
D = NP.hstack((x*x, x*y, y*y, x, y, NP.ones_like(x)))
S = NP.dot(D.T,D)
C = NP.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(NP.dot(inv(S), C))
n = NP.argmax(NP.abs(E))
a = V[:,n]
return a
def ellipse_center(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return NP.array([x0,y0])
def ellipse_angle_of_rotation( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
return 0.5*NP.arctan(2*b/(a-c))
def ellipse_axis_length( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*NP.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*NP.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1=NP.sqrt(up/down1)
res2=NP.sqrt(up/down2)
return NP.array([res1, res2])
def ellipse_angle_of_rotation2( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
if b == 0:
if a > c:
return 0
else:
return NP.pi/2
else:
if a > c:
return NP.arctan(2*b/(a-c))/2
else:
return NP.pi/2 + NP.arctan(2*b/(a-c))/2
if __name__ == '__main__':
arc = 0.8
R = NP.arange(0,arc*NP.pi, 0.01)
x = 1.5*NP.cos(R) + 2 + 0.1*NP.random.rand(len(R))
y = NP.sin(R) + 1. + 0.1*NP.random.rand(len(R))
a = fit_ellipse(x,y)
center = ellipse_center(a)
#phi = ellipse_angle_of_rotation(a)
phi = ellipse_angle_of_rotation2(a)
axes = ellipse_axis_length(a)
print('center = {}'.format(center))
print('angle of rotation = {}'.format(phi))
print('axes = {}'.format(axes))
a, b = axes
xx = center[0] + a*NP.cos(R)*NP.cos(phi) - b*NP.sin(R)*NP.sin(phi)
yy = center[1] + a*NP.cos(R)*NP.sin(phi) + b*NP.sin(R)*NP.cos(phi)
import pylab as PL
from matplotlib.patches import Ellipse
ellipse = Ellipse(center,
2 * axes[0],
2 * axes[1],
math.degrees(phi),
color='g',
alpha=0.2)
fig = PL.figure()
ax = PL.subplot(111)
PL.scatter(x, y, marker='x')
PL.plot(xx,yy, color = 'red')
ax.add_artist(ellipse)
PL.show()
| mit |
rishirajsurti/BuildingMachineLearningSystemsWithPython | ch02/seeds_knn_increasing_k.py | 24 | 1437 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# Basic imports
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from load import load_dataset
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
features, labels = load_dataset('seeds')
# Values of k to consider: all in 1 .. 160
ks = np.arange(1,161)
# We build a classifier object here with the default number of neighbors
# (It happens to be 5, but it does not matter as we will be changing it below
classifier = KNeighborsClassifier()
classifier = Pipeline([('norm', StandardScaler()), ('knn', classifier)])
# accuracies will hold our results
accuracies = []
for k in ks:
# set the classifier parameter
classifier.set_params(knn__n_neighbors=k)
crossed = cross_val_score(classifier, features, labels)
# Save only the average
accuracies.append(crossed.mean())
accuracies = np.array(accuracies)
# Scale the accuracies by 100 to plot as a percentage instead of as a fraction
plt.plot(ks, accuracies*100)
plt.xlabel('Value for k (nr. of neighbors)')
plt.ylabel('Accuracy (%)')
plt.savefig('figure6.png')
| mit |
xflin/spark | python/pyspark/sql/tests.py | 2 | 131465 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
_have_arrow = False
try:
import pyarrow
_have_arrow = True
except:
# No Arrow, but that's okay, we'll skip those tests
pass
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace list while value is not given (default to None)
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
self.assertTupleEqual(row, (None, 10, 80.0))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
try:
self.spark.conf.set("spark.sql.crossJoin.enabled", "false")
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
self.spark.conf.set("spark.sql.crossJoin.enabled", "true")
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
finally:
# We should unset this. Otherwise, other tests are affected.
self.spark.conf.unset("spark.sql.crossJoin.enabled")
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(not _have_arrow, "Arrow not installed")
class ArrowTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True)])
cls.data = [("a", 1, 10, 0.2, 2.0),
("b", 2, 20, 0.4, 4.0),
("c", 3, 30, 0.8, 6.0)]
def assertFramesEqual(self, df_with_arrow, df_without):
msg = ("DataFrame from Arrow is not equal" +
("\n\nWith Arrow:\n%s\n%s" % (df_with_arrow, df_with_arrow.dtypes)) +
("\n\nWithout:\n%s\n%s" % (df_without, df_without.dtypes)))
self.assertTrue(df_without.equals(df_with_arrow), msg=msg)
def test_unsupported_datatype(self):
schema = StructType([StructField("dt", DateType(), True)])
df = self.spark.createDataFrame([(datetime.date(1970, 1, 1),)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
pdf = df.toPandas()
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_pandas_round_trip(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
pdf = pd.DataFrame(data=data_dict)
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 |
gjsun/MPack | MPack_Core/utils/make_masks.py | 1 | 3066 | import pdb
import numpy as np
import pandas as pd
import os
import math
import matplotlib.pyplot as plt
from astropy.wcs import WCS
from astropy.io import fits
from parameter_file import *
# def CreateMask(wavelength):
# RMS from eyeballing
# 24um:
# 70um: 0.0018
# 250um: 0.0017
# 350um: 0.0023
# 450um: 4.1
# 500um: 0.0040
# 850um: 0.8
wavelength = [24,100,160,250,350,500,850,1100]
# Specify wavelengths at which we want to make masks
wv0 = np.array([1,1,1,1,1,1,1,1])
for (ii,jj) in enumerate(wv0):
if jj==1:
print '\nCreating mask for %dum...\n' % wavelength[ii]
map_pf = UDSMapsParameterFile()
map_name = map_pf[str(wavelength[ii])+'_img']
noise_name = map_pf[str(wavelength[ii])+'_rms']
mask_name = map_pf[str(wavelength[ii])+'_msk']
#print [map_name, noise_name, mask_name]
cmap, chd = fits.getdata(map_name, 0, header = True)
nmap, nhd = fits.getdata(noise_name, 0, header = True)
cms = np.shape(cmap)
rms = np.median(nmap[cms[0]/4:cms[0]*3/4, cms[1]/4:cms[1]*3/4].flatten())
#print 'rms is %.8f' % rms
new_map = np.ones_like(cmap)
#new_map = np.zeros_like(cmap)
new_map[np.where((nmap>3.*rms) + np.isnan(nmap) + (nmap==0))] = 0
#new_map[cms[0]/2-cms[0]/4:cms[0]/2+cms[0]/4, cms[1]/2-cms[1]/4:cms[1]/2+cms[1]/4] = 1
# Filter isolated ones
for i in range(cms[0]):
row_mean = (new_map[i,0:-2]+new_map[i,1:-1]+new_map[i,2::])/3.
ind_0s = np.where(row_mean<0.5)[0] + 1
new_map[i,ind_0s] = 0
for j in range(cms[1]):
col_mean = (new_map[0:-2,j]+new_map[1:-1,j]+new_map[2::,j])/3.
ind_0s = np.where(col_mean<0.5)[0] + 1
new_map[ind_0s,j] = 0
fits.writeto(mask_name, new_map, nhd, clobber=True)
print 'Created the mask of %dum map to .fits file.\n' % wavelength[ii]
print '----- DONE -----'
#for i in enumerate(wv0):
# UDSMapsParameterFile()['%s' % str(wavelength)]
'''
map_path = '/Users/guochaosun/Desktop/Caltech_OBSCOS/DataCollection/simstack_maps/uds_maps_to_use/'
mask_path = map_path + 'masks/'
pixsize_suffix = '4.0_arcsec_pixels'
map_name = map_path+'sxdf_aste_kscott20100924_map.fits'
noise_name = map_path+'sxdf_aste_kscott20100924_map_noise.fits'
mask_name = mask_path+'uds_aztec_mask.fits'
cmap, chd = fits.getdata(map_name, 0, header = True)
nmap, nhd = fits.getdata(noise_name, 0, header = True)
cms = np.shape(cmap)
rms = np.median(nmap[cms[0]/4:cms[0]*3/4, cms[1]/4:cms[1]*3/4].flatten())
print 'rms is %.8f' % rms
new_map = np.ones_like(cmap)
#new_map = np.zeros_like(cmap)
new_map[np.where((nmap>3.*rms) + np.isnan(nmap) + (nmap==0))] = 0
#new_map[cms[0]/2-cms[0]/4:cms[0]/2+cms[0]/4, cms[1]/2-cms[1]/4:cms[1]/2+cms[1]/4] = 1
# Filter isolated ones
for i in range(cms[0]):
row_mean = (new_map[i,0:-2]+new_map[i,1:-1]+new_map[i,2::])/3.
ind_0s = np.where(row_mean<0.5)[0] + 1
new_map[i,ind_0s] = 0
for j in range(cms[1]):
col_mean = (new_map[0:-2,j]+new_map[1:-1,j]+new_map[2::,j])/3.
ind_0s = np.where(col_mean<0.5)[0] + 1
new_map[ind_0s,j] = 0
fits.writeto(mask_name, new_map, nhd, clobber=True)
print '\nDONE!\n'
''' | mit |
DGrady/pandas | pandas/tests/groupby/test_filters.py | 15 | 24350 | # -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import pytest
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
pytest.raises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_filter_dropna_with_empty_groups(self):
# GH 10780
data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
expected_false = pd.Series([np.nan] * 9,
index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
expected_true = pd.Series(index=pd.Index([], dtype=int))
tm.assert_series_equal(result_true, expected_true)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
| bsd-3-clause |
dreuven/SampleSparse | SampleSparse/tests/classicLAHMCSampling/testing_np_vs_tflow_dynamics.py | 1 | 3761 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from LAHMC.python import LAHMC
import ipdb
## Set the seed from which life springs deterministically###
np.random.seed(1234)
#####
np_energy_vals = []
np_deriv_vals = []
np_energy_arg = []
np_deriv_arg = []
def E(X, sigma = 1.):
# print("Called np energy function!!!!!!!")
np_energy_arg.append(X)
energy = np.sum(X**2, axis = 0).reshape(1,-1)/2./sigma**2
np_energy_vals.append(energy)
return energy
def dedx(X, sigma = 1.):
np_deriv_arg.append(X)
deriv = X/sigma ** 2
np_deriv_vals.append(deriv)
return deriv
### Tensorflow implementation
sess = tf.Session()
input = tf.placeholder("float64",shape = (2,None))
energy = tf.reduce_sum(input**2, reduction_indices = 0)/2.
e_grad = tf.gradients(energy,[input])[0]
sess.run(tf.initialize_all_variables())
tflow_energy_vals = []
tflow_deriv_vals = []
tflow_energy_arg = []
tflow_deriv_arg = []
def E_tflow(X, sigma = 1):
# print("Called tflow energy func")
# print("In E_tflow and arg is {0}".format(X))
tflow_energy_arg.append(X)
energy_ = sess.run(energy, feed_dict = {input:X}).reshape(1,-1)
# print("In tensorflow energy func and tflow energy is {0}".format(energy_))
tflow_energy_vals.append(energy_)
return energy_
def dedx_flow(X, sigma = 1):
tflow_deriv_arg.append(X)
deriv = sess.run(e_grad, feed_dict = {input: X})
# print("In tensorflow derivative and it is {0}".format(deriv))
tflow_deriv_vals.append(deriv)
return deriv
#Lahmc params and implementation
epsilon = 0.1
beta = 0.2
num_look_ahead_steps = 1
num_leapfrog_steps = 1
#Arrays for comparison
np_sample_array = []
tflow_sample_array = []
for i in range(1):
Ainit = np.random.random((2,100))
# x = Ainit.copy()
# print(Ainit)
#Numpy sample
# print("Now handling Numpy Version: \n")
print("Calling np sampler")
print("\n\n")
sampler_np = LAHMC.LAHMC(Ainit.copy(),E, dedx, epsilon=epsilon, beta = beta, num_look_ahead_steps=10, num_leapfrog_steps = 10)
A_final_np = sampler_np.sample(100)
np_sample_array.append(A_final_np)
#tflow sample
# print("\n\nNow handling Tflow sample: \n\n")
print("Calling tflow sampler")
# print(Ainit)
np.random.seed(1234)
Ainit = np.random.random((2,100))
# print(x == Ainit)
sampler_tflow = LAHMC.LAHMC(Ainit.copy(),E_tflow, dedx_flow, epsilon=epsilon, beta=beta, num_look_ahead_steps=10, num_leapfrog_steps = 10)
# sampler_tflow = LAHMC.LAHMC(Ainit.copy(),E, dedx, epsilon=epsilon, beta = beta, num_look_ahead_steps=num_look_ahead_steps, num_leapfrog_steps = 10)
A_final_tflow = sampler_tflow.sample(100)
tflow_sample_array.append(A_final_tflow)
np_pairs = zip(np_energy_vals,np_deriv_vals)
tflow_pairs = zip(tflow_energy_vals, tflow_deriv_vals)
# print("\n\n Beginning analysis\n\n")
# for np_arg,tflow_arg in zip(np_energy_arg,tflow_energy_arg):
# print("Np argument is {0} \n tflow arg is {1} \n\n--------------------------\n".format(np_arg, tflow_arg))
count = 0
for a,b in zip(np_pairs,tflow_pairs):
np_e = a[0]
np_d = a[1]
t_e = b[0]
t_d = b[1]
print("Energy comparison")
print(np_e == t_e, "\n\n\n")
print("Derivative comparison\n\n")
print(np_d == t_d)
print()
print("On iteration {0} np energy value is {1} \n tflow_energy_value is {2} \n np_deriv is {3} \n tflow_deriv {4}\n-------------------\n".format(count, np_e,t_e,np_d,t_d))
count += 1
##Shittily compare by inspection
# count = 0
# for a,b in zip(np_sample_array,tflow_sample_array):
# print("\n-----------------------------/n")
# print("For count {0} np_sample is {1}\n\n while tflow_sample is {2} \n\n".format(count, a, b))
# count += 1
| gpl-3.0 |
guschmue/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_inst/Geneva_noRot_inst_age6/peaks_reader.py | 33 | 2761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
dingocuster/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
nvoron23/statsmodels | statsmodels/datasets/statecrime/data.py | 25 | 3128 | #! /usr/bin/env python
"""Statewide Crime Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Precent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female household, no husband prsent, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5],
dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5],
dtype=float, index_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'),
delimiter=",", names=True, dtype=None)
return data
| bsd-3-clause |
croxis/SpaceDrive | spacedrive/renderpipeline/rpcore/mount_manager.py | 1 | 12750 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import atexit
from panda3d.core import Filename, VirtualFileSystem, get_model_path
from panda3d.core import VirtualFileMountRamdisk
from direct.stdpy.file import join, isdir, isfile
from rpcore.rpobject import RPObject
class MountManager(RPObject):
""" This classes mounts the required directories for the pipeline to run.
This is important if the pipeline is in a subdirectory for example. The
mount manager also handles the lock, storing the current PID into a file
named instance.pid and ensuring that there is only 1 instance of the
pipeline running at one time. """
def __init__(self, pipeline):
""" Creates a new mount manager """
RPObject.__init__(self)
self._pipeline = pipeline
self._base_path = self._find_basepath()
self._lock_file = "instance.pid"
self._model_paths = []
self._write_path = None
self._mounted = False
self._do_cleanup = True
self._config_dir = None
self.debug("Auto-Detected base path to", self._base_path)
atexit.register(self._on_exit_cleanup)
@property
def write_path(self):
""" Returns the write path previously set with set_write_path, or None
if no write path has been set yet. """
return self._write_path
@write_path.setter
def write_path(self, pth):
""" Set a writable directory for generated files. This can be a string
path name or a multifile with openReadWrite(). If no pathname is set
then the root directory is used.
This feature is usually only used for debugging, the pipeline will dump
all generated shaders and other temporary files to that directory.
If you don't need this, you can use set_virtual_write_path(), which
will create the temporary path in the VirtualFileSystem, thus not
writing any files to disk. """
if pth is None:
self._write_path = None
self._lock_file = "instance.pid"
else:
self._write_path = Filename.from_os_specific(pth).get_fullpath()
self._lock_file = join(self._write_path, "instance.pid")
@property
def base_path(self):
""" Returns the base path of the pipeline. This returns the path previously
set with set_base_path, or the auto detected base path if no path was
set yet """
return self._base_path
@base_path.setter
def base_path(self, pth):
""" Sets the path where the base shaders and models on are contained. This
is usually the root of the rendering pipeline folder """
self.debug("Set base path to '" + pth + "'")
self._base_path = Filename.from_os_specific(pth).get_fullpath()
@property
def config_dir(self):
""" Returns the config directory previously set with set_config_dir, or
None if no directory was set yet """
@config_dir.setter
def config_dir(self, pth):
""" Sets the path to the config directory. Usually this is the config/
directory located in the pipeline root directory. However, if you want
to load your own configuration files, you can specify a custom config
directory here. Your configuration directory should contain the
pipeline.yaml, plugins.yaml, daytime.yaml and configuration.prc.
It is highly recommended you use the pipeline provided config files, modify
them to your needs, and as soon as you think they are in a final version,
copy them over. Please also notice that you should keep your config files
up-to-date, e.g. when new configuration variables are added.
Also, specifying a custom configuration_dir disables the functionality
of the PluginConfigurator and DayTime editor, since they operate on the
pipelines default config files.
Set the directory to None to use the default directory. """
self._config_dir = Filename.from_os_specific(pth).get_fullpath()
@property
def do_cleanup(self):
""" Returns whether the mount manager will attempt to cleanup the
generated files after the application stopped running """
return self._do_cleanup
@do_cleanup.setter
def do_cleanup(self, cleanup):
""" Sets whether to cleanup the tempfolder after the application stopped.
This is mostly useful for debugging, to analyze the generated tempfiles
even after the pipeline stopped running """
self._do_cleanup = cleanup
def get_lock(self):
""" Checks if we are the only instance running. If there is no instance
running, write the current PID to the instance.pid file. If the
instance file exists, checks if the specified process still runs. This
way only 1 instance of the pipeline can be run at one time. """
# Check if there is a lockfile at all
if isfile(self._lock_file):
# Read process id from lockfile
try:
with open(self._lock_file, "r") as handle:
pid = int(handle.readline())
except IOError as msg:
self.error("Failed to read lockfile:", msg)
return False
# Check if the process is still running
if self._is_pid_running(pid):
self.error("Found running instance")
return False
# Process is not running anymore, we can write the lockfile
self._write_lock()
return True
else:
# When there is no lockfile, just create it and continue
self._write_lock()
return True
def _find_basepath(self):
""" Attempts to find the pipeline base path by looking at the location
of this file """
pth = os.path.abspath(join(os.path.dirname(os.path.realpath(__file__)), ".."))
return Filename.from_os_specific(pth).get_fullpath()
def _is_pid_running(self, pid):
""" Checks if a pid is still running """
# Code snippet from:
# http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
if os.name == 'posix':
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as err:
return err.errno == errno.EPERM
else:
return True
else:
import ctypes
kernel32 = ctypes.windll.kernel32
process = kernel32.OpenProcess(0x100000, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
else:
return False
def _write_lock(self):
""" Internal method to write the current process id to the instance.pid
lockfile. This is used to ensure no second instance of the pipeline is
running. """
with open(self._lock_file, "w") as handle:
handle.write(str(os.getpid()))
def _try_remove(self, fname):
""" Tries to remove the specified filename, returns either True or False
depending if we had success or not """
try:
os.remove(fname)
return True
except IOError:
pass
except WindowsError:
pass
return False
def _on_exit_cleanup(self):
""" Gets called when the application exists """
if self._do_cleanup:
self.debug("Cleaning up ..")
if self._write_path is not None:
# Try removing the lockfile
self._try_remove(self._lock_file)
# Check for further tempfiles in the write path
# We explicitely use os.listdir here instead of pandas listdir,
# to work with actual paths
for fname in os.listdir(self._write_path):
pth = join(self._write_path, fname)
# Tempfiles from the pipeline start with "$$" to distinguish
# them from user created files
if isfile(pth) and fname.startswith("$$"):
self._try_remove(pth)
# Delete the write path if no files are left
if len(os.listdir(self._write_path)) < 1:
try:
os.removedirs(self._write_path)
except IOError:
pass
@property
def is_mounted(self):
""" Returns whether the MountManager was already mounted by calling
mount() """
return self._mounted
def mount(self):
""" Inits the VFS Mounts. This creates the following virtual directory
structure, from which all files can be located:
/$$rp/ (Mounted from the render pipeline base directory)
+ rpcore/
+ shader/
+ ...
/$rpconfig/ (Mounted from config/, may be set by user)
+ pipeline.yaml
+ ...
/$$rptemp/ (Either ramdisk or user specified)
+ day_time_config
+ shader_auto_config
+ ...
/$$rpshader/ (Link to /$$rp/rpcore/shader)
"""
self.debug("Setting up virtual filesystem")
self._mounted = True
convert_path = lambda pth: Filename.from_os_specific(pth).get_fullpath()
vfs = VirtualFileSystem.get_global_ptr()
# Mount config dir as $$rpconf
if self._config_dir is None:
config_dir = convert_path(join(self._base_path, "config/"))
self.debug("Mounting auto-detected config dir:", config_dir)
vfs.mount(config_dir, "/$$rpconfig", 0)
else:
self.debug("Mounting custom config dir:", self._config_dir)
vfs.mount(convert_path(self._config_dir), "/$$rpconfig", 0)
# Mount directory structure
vfs.mount(convert_path(self._base_path), "/$$rp", 0)
vfs.mount(convert_path(join(self._base_path, "rpcore/shader")), "/$$rp/shader", 0)
vfs.mount(convert_path(join(self._base_path, "effects")), "effects", 0)
# Mount the pipeline temp path:
# If no write path is specified, use a virtual ramdisk
if self._write_path is None:
self.debug("Mounting ramdisk as /$$rptemp")
vfs.mount(VirtualFileMountRamdisk(), "/$$rptemp", 0)
else:
# In case an actual write path is specified:
# Ensure the pipeline write path exists, and if not, create it
if not isdir(self._write_path):
self.debug("Creating temporary path, since it does not exist yet")
try:
os.makedirs(self._write_path)
except IOError as msg:
self.fatal("Failed to create temporary path:", msg)
self.debug("Mounting", self._write_path, "as /$$rptemp")
vfs.mount(convert_path(self._write_path), '/$$rptemp', 0)
get_model_path().prepend_directory("/$$rp")
get_model_path().prepend_directory("/$$rp/shader")
get_model_path().prepend_directory("/$$rptemp")
def unmount(self):
""" Unmounts the VFS """
raise NotImplementedError("TODO")
| mit |
StevePny/NOAA-GFDL-MOM6-examples | tools/analysis/MLD_003.py | 2 | 4099 | #!/usr/bin/env python
import netCDF4
import numpy
import m6plot
import matplotlib.pyplot as plt
try: import argparse
except: raise Exception('This version of python is not new enough. python 2.7 or newer is required.')
parser = argparse.ArgumentParser(description='''Script for plotting annual-min/max mixed layer depth.''')
parser.add_argument('monthly_file', type=str, help='''Monthly-averaged file containing at least 12 months of MLD_003.''')
parser.add_argument('-l','--label', type=str, default='', help='''Label to add to the plot.''')
parser.add_argument('-o','--outdir', type=str, default='.', help='''Directory in which to place plots.''')
parser.add_argument('-od','--obsdata', type=str,
default='/archive/gold/datasets/obs/Hosada2010_MLD_climatology.v20140515.nc',
help='''File containing the observational MLD data (Hosoda et al., 2010).''')
parser.add_argument('-g','--gridspecdir', type=str, required=True,
help='''Directory containing mosaic/grid-spec files (ocean_hgrid.nc and ocean_mask.nc).''')
cmdLineArgs = parser.parse_args()
rootGroup = netCDF4.Dataset( cmdLineArgs.monthly_file )
if 'MLD_003' not in rootGroup.variables: raise Exception('Could not find "MLD_003" in file "%s"'%(cmdLineArgs.monthly_file))
x = netCDF4.Dataset(cmdLineArgs.gridspecdir+'/ocean_hgrid.nc').variables['x'][::2,::2]
y = netCDF4.Dataset(cmdLineArgs.gridspecdir+'/ocean_hgrid.nc').variables['y'][::2,::2]
msk = netCDF4.Dataset(cmdLineArgs.gridspecdir+'/ocean_mask.nc').variables['mask'][:]
area = msk*netCDF4.Dataset(cmdLineArgs.gridspecdir+'/ocean_hgrid.nc').variables['area'][:,:].reshape([msk.shape[0], 2, msk.shape[1], 2]).sum(axis=-3).sum(axis=-1)
variable = rootGroup.variables['MLD_003']
shape = variable.shape
MLD = variable[:].reshape(shape[0]/12,12,shape[1],shape[2]).mean(axis=0)
MLD_obs = netCDF4.Dataset(cmdLineArgs.obsdata).variables['MLD'][:]
x_obs = netCDF4.Dataset(cmdLineArgs.obsdata).variables['LONGITUDE'][:]
y_obs = netCDF4.Dataset(cmdLineArgs.obsdata).variables['LATITUDE'][:]
ciMin = m6plot.linCI(0,95,5)
ciMax = m6plot.linCI(0,680,20)
# Plot of shallowest model MLD (summer)
m6plot.xyplot( MLD.min(axis=0), x, y, area=area,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Annual-minimum MLD$_{0.03}$ [m]',
clim=ciMin, extend='max', colormap='dunneRainbow',
save=cmdLineArgs.outdir+'/MLD_003_minimum.png')
# 2-panel plot of shallowest model MLD + obs (summer)
m6plot.setFigureSize(aspect=[3,3], verticalresolution=976, npanels=0)
ax1 = plt.subplot(2,1,1)
m6plot.xyplot( numpy.roll(MLD_obs.min(axis=0),300,axis=-1), x_obs-300, y_obs,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Hosoda et al., 2010, annual-minimum MLD$_{0.03}$ [m]',
clim=ciMin, extend='max', colormap='dunneRainbow',
axis=ax1)
ax2 = plt.subplot(2,1,2)
m6plot.xyplot( MLD.min(axis=0), x, y, area=area,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Annual-minimum MLD$_{0.03}$ [m]',
clim=ciMin, extend='max', colormap='dunneRainbow',
axis=ax2,
save=cmdLineArgs.outdir+'/MLD_003_minimum.2_panel.png')
# Plot of deepest model MLD (winter)
m6plot.xyplot( MLD.max(axis=0), x, y, area=area,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Annual-maximum MLD$_{0.03}$ [m]',
clim=ciMax, extend='max', colormap='dunneRainbow',
save=cmdLineArgs.outdir+'/MLD_003_maximum.png')
# 2-panel plot of deepest model MLD + obs (winter)
m6plot.setFigureSize(aspect=[3,3], verticalresolution=976, npanels=0)
ax1 = plt.subplot(2,1,1)
m6plot.xyplot( numpy.roll(MLD_obs.max(axis=0),300,axis=-1), x_obs-300, y_obs,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Hosoda et al., 2010, annual-maximum MLD$_{0.03}$ [m]',
clim=ciMax, extend='max', colormap='dunneRainbow',
axis=ax1)
ax2 = plt.subplot(2,1,2)
m6plot.xyplot( MLD.max(axis=0), x, y, area=area,
suptitle=rootGroup.title+' '+cmdLineArgs.label, title='Annual-maximum MLD$_{0.03}$ [m]',
clim=ciMax, extend='max', colormap='dunneRainbow',
axis=ax2,
save=cmdLineArgs.outdir+'/MLD_003_maximum.2_panel.png')
| gpl-3.0 |
nlpub/mnogoznal | eval/measure.py | 1 | 3588 | #!/usr/bin/env python3
import argparse
import csv
from collections import defaultdict, OrderedDict
from concurrent.futures import ProcessPoolExecutor
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import homogeneity_score, completeness_score, v_measure_score
parser = argparse.ArgumentParser(description='SemEval 2010 WSI&D Task V-Measure & ARI')
parser.add_argument('--gold', required=True)
parser.add_argument('--measure', choices=('vmeasure', 'ari'), default='vmeasure', type=str)
parser.add_argument('--average', choices=('instances', 'words'), default='instances', type=str)
parser.add_argument('path', nargs='+')
args = parser.parse_args()
def parse(filename):
dataset = defaultdict(dict)
with open(filename, 'r', encoding='UTF-8') as f:
reader = csv.reader(f, delimiter=' ', quoting=csv.QUOTE_NONE)
for lemma, instance, sense in reader:
dataset[lemma][instance] = sense
return dataset
with ProcessPoolExecutor() as executor:
paths = args.path + [args.gold]
systems = {path: wsd for path, wsd in zip(paths, executor.map(parse, paths))}
gold = systems.pop(args.gold)
lemmas = sorted(gold.keys())
total = sum(len(values) for values in gold.values())
def evaluate(path):
system = systems[path]
measure, scores, clusters_gold, clusters_system = 0., OrderedDict(), [], []
for lemma in lemmas:
instances = sorted(gold[lemma].keys())
senses_gold = {sid: i for i, sid in enumerate(sorted(set(gold[lemma].values())))}
senses_system = {sid: i for i, sid in enumerate(sorted(set(system[lemma].values())))}
clusters_gold = [senses_gold[gold[lemma][instance]] for instance in instances]
clusters_system = [senses_system[system[lemma][instance]] for instance in instances]
if 'vmeasure' == args.measure:
if 'instances' == args.average:
measure += v_measure_score(clusters_gold, clusters_system) * len(instances) / total
else:
measure += v_measure_score(clusters_gold, clusters_system)
scores[lemma] = (
homogeneity_score(clusters_gold, clusters_system),
completeness_score(clusters_gold, clusters_system),
v_measure_score(clusters_gold, clusters_system)
)
else:
scores[lemma] = adjusted_rand_score(clusters_gold, clusters_system)
if 'instances' == args.average:
measure += scores[lemma] * len(instances) / total
else:
measure += scores[lemma]
if 'words' == args.average:
measure /= len(lemmas)
return measure, scores
with ProcessPoolExecutor() as executor:
results = {path: result for path, result in zip(systems, executor.map(evaluate, systems))}
if 'vmeasure' == args.measure:
print('\t'.join(('path', 'lemma', 'homogeneity', 'completeness', 'vmeasure')))
for path, (v_measure, scores) in results.items():
print('\t'.join((path, '', '', '', '%.6f' % v_measure)))
for lemma, (homogeneity, completeness, v_measure) in scores.items():
print('\t'.join((
path,
lemma,
'%.6f' % homogeneity,
'%.6f' % completeness,
'%.6f' % v_measure
)))
else:
print('\t'.join(('path', 'lemma', 'ari')))
for path, (ari, scores) in results.items():
print('\t'.join((path, '', '%.6f' % ari)))
for lemma, ari in scores.items():
print('\t'.join((path, lemma, '%.6f' % ari)))
| mit |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/IPython/core/magics/pylab.py | 4 | 6517 | """Implementation of magic functions for matplotlib/pylab support.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from traitlets.config.application import Application
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from warnings import warn
from IPython.core.pylabtools import backends
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
magic_gui_arg = magic_arguments.argument(
'gui', nargs='?',
help="""Name of the matplotlib backend to use %s.
If given, the corresponding matplotlib backend is used,
otherwise it will be matplotlib's default
(which you can set in your matplotlib config file).
""" % str(tuple(sorted(backends.keys())))
)
@magics_class
class PylabMagics(Magics):
"""Magics related to matplotlib's pylab support"""
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument('-l', '--list', action='store_true',
help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
This function lets you activate matplotlib interactive support
at any point during an IPython session. It does not import anything
into the interactive namespace.
If you are using the inline matplotlib backend in the IPython Notebook
you can set which figure formats are enabled using the following::
In [1]: from IPython.display import set_matplotlib_formats
In [2]: set_matplotlib_formats('pdf', 'svg')
The default for inline figures sets `bbox_inches` to 'tight'. This can
cause discrepancies between the displayed image and the identical
image created using `savefig`. This behavior can be disabled using the
`%config` magic::
In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
In addition, see the docstring of
`IPython.display.set_matplotlib_formats` and
`IPython.display.set_matplotlib_close` for more information on
changing additional behaviors of the inline backend.
Examples
--------
To enable the inline backend for usage with the IPython Notebook::
In [1]: %matplotlib inline
In this case, where the matplotlib default is TkAgg::
In [2]: %matplotlib
Using matplotlib backend: TkAgg
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
You can list the available backends using the -l/--list option::
In [4]: %matplotlib --list
Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
if args.list:
backends_list = list(backends.keys())
print("Available matplotlib backends: %s" % backends_list)
else:
gui, backend = self.shell.enable_matplotlib(args.gui)
self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'--no-import-all', action='store_true', default=None,
help="""Prevent IPython from performing ``import *`` into the interactive namespace.
You can govern the default behavior of this flag with the
InteractiveShellApp.pylab_import_all configurable.
"""
)
@magic_gui_arg
def pylab(self, line=''):
"""Load numpy and matplotlib to work interactively.
This function lets you activate pylab (matplotlib, numpy and
interactive support) at any point during an IPython session.
%pylab makes the following imports::
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
If you pass `--no-import-all`, the last two `*` imports will be excluded.
See the %matplotlib magic for more details about activating matplotlib
without affecting the interactive namespace.
"""
args = magic_arguments.parse_argstring(self.pylab, line)
if args.no_import_all is None:
# get default from Application
if Application.initialized():
app = Application.instance()
try:
import_all = app.pylab_import_all
except AttributeError:
import_all = True
else:
# nothing specified, no app - default True
import_all = True
else:
# invert no-import flag
import_all = not args.no_import_all
gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
self._show_matplotlib_backend(args.gui, backend)
print ("Populating the interactive namespace from numpy and matplotlib")
if clobbered:
warn("pylab import has clobbered these variables: %s" % clobbered +
"\n`%matplotlib` prevents importing * from pylab and numpy"
)
def _show_matplotlib_backend(self, gui, backend):
"""show matplotlib message backend message"""
if not gui or gui == 'auto':
print("Using matplotlib backend: %s" % backend)
| mit |
ravenshooter/BA_Analysis | Figures.py | 1 | 10091 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.cm
from scipy.signal.windows import gaussian
import sklearn.metrics
from DataSet import createDataSetFromFile
from Utils import getProjectPath
from Evaluation import getSpecificColorMap, plotMinErrors, plotAlongAxisErrors,\
plotMinErrorsSqueezed
def createTargetShapeDelayFigure():
gestureLen = 20
gestureSig = np.concatenate([np.zeros((10,3)),np.random.normal(size=(gestureLen,3))*np.atleast_2d(gaussian(20, 3, 0)*2).T,np.zeros((10,3))],0)
target = np.concatenate([np.zeros((10,1)),np.ones((gestureLen,1)),np.zeros((10,1))],0)
target_gaus = np.concatenate([np.zeros((5,1)),np.atleast_2d(gaussian(gestureLen+10,5)).T,np.zeros((5,1))],0)
target_delayed = np.concatenate([np.zeros((28,1)),np.ones((5,1)),np.zeros((7,1))],0)
fig, ax = plt.subplots(1, 3, sharey=True, sharex=True, figsize=(20,5))
plt.ylim(-5,5)
for axn in ax:
axn.plot(gestureSig,label='input signal')
axn.plot([0,40],[0,0],c='black',linewidth=1)
ax[0].plot(target,label='target',c='red',linewidth=2)
ax[0].fill_between(np.arange(0,40),0,target.squeeze(),facecolor='red',alpha=0.5)
ax[0].set_title('(a)')
ax[0].set_xlabel('timestep')
ax[1].plot(target_gaus,label='target',c='red',linewidth=2)
ax[1].fill_between(np.arange(0,40),0,target_gaus.squeeze(),facecolor='red',alpha=0.5)
ax[1].set_title('(b)')
ax[1].set_xlabel('timestep')
ax[2].plot(target_delayed,label='target',c='red',linewidth=2)
ax[2].fill_between(np.arange(0,40),0,target_delayed.squeeze(),facecolor='red',alpha=0.5)
ax[2].set_title('(c)')
ax[2].set_xlabel('timestep')
#plt.legend(bbox_to_anchor=(1., 1.05), loc=1, borderaxespad=0.)
plt.tight_layout()
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\targetShapeDelay2.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
def createEvaluationProblem():
gestureLen = 20
target = np.concatenate([np.ones((gestureLen+1,1)),np.zeros((9,1)),np.ones((gestureLen,1)),np.zeros((40,1))],0)
target2 = np.concatenate([np.zeros((70,1)),np.ones((gestureLen,1))],0)
pred1 = np.concatenate([np.ones((8,1)),np.zeros((5,1)),np.ones((8,1)),np.zeros((69,1))],0)
pred2 = np.concatenate([np.zeros((7,1)),np.ones((7,1)),np.zeros((66,1)),np.ones((10,1))],0)
zero = np.zeros((100,1))
plt.figure(figsize=(20,5))
#plt.plot(target, label='Target Gesture 1', color='red', linewidth=2, linestyle='--')
#plt.plot(pred1, label='Pred. Gesture 1', color='red', linewidth=2, linestyle='-')
#plt.plot(pred2, label='Pred. Gesture 2', color='blue', linewidth=2, linestyle='-')
#plt.fill_between(np.arange(0,70), 0, 1, label='Target Gesture 1', facecolor='red', alpha=0.2, where=np.squeeze(target>0))
#plt.fill_between(np.arange(0,70), 0, np.squeeze(pred1), label='Pred. Gesture 1', facecolor='red', where=np.squeeze(pred1>=pred2))
#plt.fill_between(np.arange(0,70), 0, np.squeeze(pred2), label='Pred. Gesture 2', facecolor='blue', where=np.squeeze(pred2>=pred1))
plt.plot(np.ones((90,1))*0.5,color='black')
plt.plot(np.ones((90,1))*1,color='black')
plt.plot(np.ones((90,1))*-0.5,color='black')
plt.plot(np.ones((90,1))*-1,color='black')
plt.fill_between(np.arange(0,90), 0.5, 1, label='no gesture', facecolor='grey', alpha=0.4)
plt.fill_between(np.arange(0,90), 0.5, 1, facecolor='red', alpha=0.8, where=np.squeeze(target>0))
plt.fill_between(np.arange(0,90), 0.5, 1, facecolor='blue', alpha=0.8, where=np.squeeze(target2>0))
plt.fill_between(np.arange(0,90), -0.5, -1, facecolor='grey', alpha=0.4)
plt.fill_between(np.arange(0,90), -0.5, -1, label='Gesture 1', facecolor='red', where=np.squeeze(pred1==1))
plt.fill_between(np.arange(0,90), -0.50, -1, label='Gesture 2', facecolor='blue', where=np.squeeze(pred2==1))
plt.fill_between(np.arange(0,90), -0.2, 0.2, facecolor='yellow', alpha=0.2)
plt.annotate('TP',xy=(3.5,-0.1))
plt.plot([3,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('WG',xy=(8,-0.1))
plt.plot([10,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('FP',xy=(14,-0.1))
plt.plot([17,10],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(34,-0.1))
plt.plot([50,25],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('FN',xy=(46,-0.1))
plt.plot([50,40],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(55.5,-0.1))
plt.plot([50,60],[-0.75,0.75],linewidth=3, color='black')
plt.annotate('TP',xy=(83.5,-0.1))
plt.plot([85,80],[-0.75,0.75],linewidth=3, color='black')
ax = plt.gca()
ax.text( 2.5, -1.3,str(1),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text( 9.5, -1.3,str(2),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(15 , -1.3,str(3),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(50 , -1.3,str(4),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(84.5, -1.3,str(5),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(39.5, 1.2,str(6),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
ax.text(59.5, 1.2,str(7),bbox=dict(facecolor='none', edgecolor='black', boxstyle='circle,pad=0.5'))
plt.xlabel('time step')
plt.yticks([-0.75,0,0.75])
plt.setp(plt.gca(), 'yticklabels', ['Prediction','Mapping','Target'])
plt.ylim(-1.5,1.5)
plt.xlim(0,120)
plt.legend()
plt.tight_layout()
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\classificationProb.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
true = [1,1,1,2,3,3,3]
pred = [1,2,3,2,1,3,3]
print sklearn.metrics.f1_score(true,pred,average=None)
print np.mean(sklearn.metrics.f1_score(true,pred,average=None))
def createInputSignalFigure():
errors = [0.272813277233,0.233033147087,0.217966453407,0.139282580674,0.0953774246893,0.0898370698925,0.0551168200035]
labels = ['F','G','A','FG','FA','GA','FGA']
ax = plt.subplot()
#ax.bar(np.arange(0,7), errors, alpha=0.5)
cmap = matplotlib.cm.brg_r
for i, error in enumerate(errors):
ax.bar([i], errors[i], facecolor=cmap(error/0.5), alpha=1)
ax.set_xticks(np.arange(0.5,7.5,1))
ax.set_xticklabels(labels)
plt.ylabel('Validation Error')
plt.xlabel('Input signal')
plt.xlim(-0.5,7.5)
plt.ylim(0,0.5)
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\errorByInput.pdf'
pp = PdfPages(projectPath)
pp.savefig()
pp.close()
return ax
def createGroundTruthCreation():
ds = createDataSetFromFile('julian_0_fullSet.npz')
def bla():
vals = np.array([0.8867924528301887,
0.85238095238095235,
0.89047619047619042,
0.8418604651162791,
0.89622641509433965,
0.875,
0.86301369863013699,
0.82027649769585254,
0.83783783783783783,
0.90094339622641506,
0.75,
0.74568965517241381,
0.76855895196506552,
0.78240740740740744,
0.76923076923076927,
0.85308056872037918,
0.85915492957746475,
0.87019230769230771,
0.86976744186046506,
0.82938388625592419,
0.90047393364928907,
0.83257918552036203,
0.80888888888888888,
0.89671361502347413,
0.86915887850467288,
0.78026905829596416,
0.76211453744493396,
0.76956521739130435,
0.73931623931623935,
0.75107296137339052,
0.90476190476190477,
0.84931506849315064,
0.89099526066350709,
0.83486238532110091,
0.84722222222222221,
0.86098654708520184,
0.87441860465116283,
0.8545454545454545,
0.85849056603773588,
0.88732394366197187,
0.74889867841409696,
0.79824561403508776,
0.82949308755760365,
0.77253218884120167,
0.77876106194690264])
np.set_printoptions(precision=3)
for i in range(9):
print i
print str( "{0:.3f}".format(np.mean(vals[i*5:i*5+5]) )) + " (" + str("{0:.2f}".format(np.std(vals[i*5:i*5+5]))) + ")"
print
def evaluateNPZ(npzFile):
pp = PdfPages(getProjectPath()+"error_space_"+npzFile+".pdf")
a = np.load(getProjectPath()+npzFile)
plotMinErrors(a['errors'], a['params'], a['paraRanges'], pp, getSpecificColorMap())
i = 0
inputSignalAxis = -1
inputScalingAxis = -1
normAxis = -1
for node, param in a['params']:
if param == 'spectral_radius':
inputSignalAxis = i
elif param == 'output_dim':
inputScalingAxis = i
elif param == 'ridge_param':
normAxis = i
i =i+1
plotAlongAxisErrors(a['errors'], a['params'], a['paraRanges'], normAxis, inputSignalAxis, inputScalingAxis, pp, getSpecificColorMap())
pp.close()
#plt.close('all')
def plotErrorResSize():
matplotlib.rcParams.update({'font.size': 25})
npzFile = '2016-04-28-09-57_bigRunOnlySnap.npz'
npz2 = '2016-04-28-15-18_bigRunOnlySnap.npz'
projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\errorResSize.pdf'
pp = PdfPages(projectPath)
a = np.load(getProjectPath()+npzFile)
errors = a['errors']
errors = np.mean(errors,2).squeeze()
b = np.load(getProjectPath()+npz2)
errors2 = b['errors']
errors2 = np.mean(errors2,2).squeeze()
plt.figure(figsize=(10,7.5))
plt.plot(errors, 'o', linestyle='-', linewidth=3, label='ridge para = 0.01')
#plt.plot(errors2, 'o', linestyle='-', linewidth=3, label='ridge para = 0.1')
plt.grid()
plt.minorticks_on()
plt.grid(which='minor', axis='y')
plt.xlabel('Reservoir size')
ticks = np.arange(0, 8)
labels = [25,50,100,200,400,800,1600,3200]
plt.xticks(ticks, labels)
plt.ylabel('Validation error')
plt.ylim(0,1)
plt.tight_layout()
pp.savefig()
pp.close()
#plt.close('all')
if __name__ == '__main__':
matplotlib.rcParams.update({'font.size': 20})
createGroundTruthCreation()
| mit |
jseabold/scikit-learn | benchmarks/bench_isolation_forest.py | 40 | 3136 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http']#, 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
appapantula/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
florentchandelier/zipline | zipline/protocol.py | 2 | 8390 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warnings import warn
import pandas as pd
from zipline.assets import Asset
from zipline.utils.input_validation import expect_types
from .utils.enum import enum
from zipline._protocol import BarData # noqa
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__.update(initial_values)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
def _deprecated_getitem_method(name, attrs):
"""Create a deprecated ``__getitem__`` method that tells users to use
getattr instead.
Parameters
----------
name : str
The name of the object in the warning message.
attrs : iterable[str]
The set of allowed attributes.
Returns
-------
__getitem__ : callable[any, str]
The ``__getitem__`` method to put in the class dict.
"""
attrs = frozenset(attrs)
msg = (
"'{name}[{attr!r}]' is deprecated, please use"
" '{name}.{attr}' instead"
)
def __getitem__(self, key):
"""``__getitem__`` is deprecated, please use attribute access instead.
"""
warn(msg.format(name=name, attr=key), DeprecationWarning, stacklevel=2)
if key in attrs:
return self.__dict__[key]
raise KeyError(key)
return __getitem__
class Order(Event):
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'order', {
'dt',
'sid',
'amount',
'stop',
'limit',
'id',
'filled',
'commission',
'stop_reached',
'limit_reached',
'created',
},
)
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'portfolio', {
'capital_used',
'starting_cash',
'portfolio_value',
'pnl',
'returns',
'cash',
'positions',
'start_date',
'positions_value',
},
)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.total_positions_exposure = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __repr__(self):
return "Account({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'account', {
'settled_cash',
'accrued_interest',
'buying_power',
'equity_with_loan',
'total_positions_value',
'total_positions_exposure',
'regt_equity',
'regt_margin',
'initial_margin_requirement',
'maintenance_margin_requirement',
'available_funds',
'excess_liquidity',
'cushion',
'day_trades_remaining',
'leverage',
'net_leverage',
'net_liquidation',
},
)
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset):
self.asset = asset
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
@property
def sid(self):
# for backwards compatibility
return self.asset
def __repr__(self):
return "Position({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
# Copied from Position and renamed. This is used to handle cases where a user
# does something like `context.portfolio.positions[100]` instead of
# `context.portfolio.positions[sid(100)]`.
class _DeprecatedSidLookupPosition(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
def __repr__(self):
return "_DeprecatedSidLookupPosition({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
class Positions(dict):
def __missing__(self, key):
if isinstance(key, Asset):
return Position(key)
elif isinstance(key, int):
warn("Referencing positions by integer is deprecated."
" Use an asset instead.")
else:
warn("Position lookup expected a value of type Asset but got {0}"
" instead.".format(type(key).__name__))
return _DeprecatedSidLookupPosition(key)
| apache-2.0 |
francesco-mannella/dmp-esn | DMP/stulp/src/dynamicalsystems/plotting/plotDynamicalSystemComparison.py | 2 | 1772 | import numpy
import os
import sys
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import subprocess
from plotDynamicalSystem import plotDynamicalSystem
def plotDynamicalSystemComparison(data1,data2,name1,name2,axs,axs_diff):
lines1 = plotDynamicalSystem(data1,axs)
lines2 = plotDynamicalSystem(data2,axs)
plt.setp(lines1,linestyle='-', linewidth=4, color=(0.8,0.8,0.8), label=name1)
plt.setp(lines2,linestyle='--', linewidth=2, color=(0.0,0.0,0.5), label=name2)
plt.legend()
data_diff = data1-data2;
data_diff[:,-1] = data1[:,-1] # Don't subtract time...
lines_diff = plotDynamicalSystem(data_diff,axs_diff)
plt.setp(lines_diff,linestyle= '-', linewidth=2, color=(0.50,0.00,0.00), label='diff')
plt.legend()
if __name__=='__main__':
# Process arguments
if ( (len(sys.argv)<3)):
print '\nUsage: '+sys.argv[0]+' <filename1> <filename2> [system order]\n';
sys.exit()
filename1 = str(sys.argv[1])
filename2 = str(sys.argv[2])
system_order = 1
if ( (len(sys.argv)>3)):
system_order = int(sys.argv[3])
try:
data1 = numpy.loadtxt(filename1);
except IOError:
print "File '"+filename1+ "' does not exist. ABORT."
sys.exit(-1)
try:
data2 = numpy.loadtxt(filename2);
except IOError:
print "File '"+filename2+ "' does not exist. ABORT."
sys.exit(-1)
fig = plt.figure(1,figsize=(12, 4))
axs = [];
axs_diff = [];
for sp in range(2*system_order):
axs.append(fig.add_subplot(2,2*system_order,sp+1));
axs_diff.append(fig.add_subplot(2,2*system_order,sp+1+2*system_order));
plotDynamicalSystemComparison(data1,data2,filename1,filename2,axs,axs_diff)
#fig.tight_layout()
plt.show() | gpl-2.0 |
zhenv5/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
JaviMerino/bart | bart/common/Utils.py | 1 | 5498 | # Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions for sheye"""
import trappy
import numpy as np
# pylint fails to recognize numpy members.
# pylint: disable=no-member
def init_run(trace):
"""Initialize the Run Object
:param trace: Path for the trace file
or a trace object
:type trace: str, :mod:`trappy.run.Run`
"""
if isinstance(trace, basestring):
return trappy.Run(trace)
elif isinstance(trace, trappy.Run):
return trace
raise ValueError("Invalid trace Object")
def select_window(series, window):
"""Helper Function to select a portion of
pandas time series
:param series: Input Time Series data
:type series: :mod:`pandas.Series`
:param window: A tuple indicating a time window
:type window: tuple
"""
if not window:
return series
start, stop = window
ix = series.index
selector = ((ix >= start) & (ix <= stop))
window_series = series[selector]
return window_series
def area_under_curve(series, sign=None, method="trapz", step="post"):
"""Return the area under the time series curve (Integral)
:param series: The time series to be integrated
:type series: :mod:`pandas.Series`
:param sign: Clip the data for the area in positive
or negative regions. Can have two values
- `"+"`
- `"-"`
:type sign: str
:param method: The method for area calculation. This can
be any of the integration methods supported in `numpy`
or `rect`
:type param: str
:param step: The step behaviour for `rect` method
:type step: str
*Rectangular Method*
- Step: Post
Consider the following time series data
.. code::
2 *----*----*----+
| |
1 | *----*----+
|
0 *----*----+
0 1 2 3 4 5 6 7
.. code::
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\sum_{k=0}^{N-1} (x_{k+1} - {x_k}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 2) = 8
- Step: Pre
.. code::
2 +----*----*----*
| |
1 | +----*----*----+
|
0 *----*
0 1 2 3 4 5 6 7
.. code::
import pandas as pd
a = [0, 0, 2, 2, 2, 1, 1]
s = pd.Series(a)
The area under the curve is:
.. math::
\sum_{k=1}^{N} (x_k - x_{k-1}) \\times f(x_k) \\\\
(2 \\times 3) + (1 \\times 3) = 9
"""
if sign == "+":
series = series.clip_lower(0)
elif sign == "=":
series = series.clip_upper(0)
series = series.dropna()
if method == "rect":
if step == "post":
values = series.values[:-1]
elif step == "pre":
values = series.values[1:]
else:
raise ValueError("Invalid Value for step: {}".format(step))
return (values * np.diff(series.index)).sum()
if hasattr(np, method):
np_integ_method = getattr(np, method)
np_integ_method(series.values, series.index)
else:
raise ValueError("Invalid method: {}".format(method))
def interval_sum(series, value=None):
"""A function that returns the sum of the
intervals where the value of series is equal to
the expected value. Consider the following time
series data
====== =======
Time Value
====== =======
1 0
2 0
3 1
4 1
5 1
6 1
7 0
8 1
9 0
10 1
11 1
====== =======
1 occurs contiguously between the following indices
the series:
- 3 to 6
- 10 to 11
There for `interval_sum` for the value 1 is
.. math::
(6 - 3) + (11 - 10) = 4
:param series: The time series data
:type series: :mod:`pandas.Series`
:param value: The value to checked for in the series. If the
value is None, the truth value of the elements in the
series will be used
:type value: element
"""
index = series.index
array = series.values
time_splits = np.append(np.where(np.diff(array) != 0), len(array) - 1)
prev = 0
time = 0
for split in time_splits:
first_val = series[index[split]]
check = (first_val == value) if value else first_val
if check and prev != split:
time += index[split] - index[prev]
prev = split + 1
return time
| apache-2.0 |
gpetretto/pymatgen | pymatgen/analysis/wulff.py | 3 | 21287 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
Tran, R.; Xu, Z.; Radhakrishnan, B.; Winston, D.; Persson, K. A.; Ong, S. P.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from __future__ import division, unicode_literals
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_recp_symmetry_operation
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
__author__ = 'Zihan Xu, Richard Tran, Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Zihan Xu'
__email__ = '[email protected]'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet(object):
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape(object):
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = get_recp_symmetry_operation(self.structure, self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, \
e_surf_on_wulff_list
def show(self, *args, **kwargs):
"""
Show the Wulff plot.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False,
legend_on=True, aspect_ratio=(8, 8), custom_colors={}):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: Default color for facets not present on the Wulff shape.
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
custom_colors ({(h,k,l}: [r,g,b,alpha}): Customize color of each
facet with a dictionary. The key is the corresponding Miller
index and value is the color. Undefined facets will use default
color site. Note: If you decide to set your own colors, it
probably won't make any sense to have the color bar on.
Return:
(matplotlib.pyplot)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
color_list, color_proxy, color_proxy_on_wulff, \
miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color, custom_colors=custom_colors)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
lines = list(plane.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
cbar.set_label('Surface Energies ($J/m^2$)', fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
return self.total_surface_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy)\
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See Ballufi, R. W., Allen, S. M. & Carter, W. C. Kinetics
of Materials. (John Wiley & Sons, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
@property
def effective_radius(self):
"""
Radius of the Wulffshape when the
Wulffshape is approximated as a sphere.
Returns:
(float) radius.
"""
return ((3/4)*(self.volume/np.pi)) ** (1 / 3)
@property
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
| mit |
zxsted/keras | examples/addition_rnn.py | 50 | 5900 | # -*- coding: utf-8 -*-
from __future__ import print_function
from keras.models import Sequential, slice_X
from keras.layers.core import Activation, Dense, RepeatVector
from keras.layers import recurrent
from sklearn.utils import shuffle
import numpy as np
"""
An implementation of sequence to sequence learning for performing addition
Input: "535+61"
Output: "596"
Padding is handled by using a repeated sentinel character (space)
By default, the JZS1 recurrent neural network is used
JZS1 was an "evolved" recurrent neural network performing well on arithmetic benchmark in:
"An Empirical Exploration of Recurrent Network Architectures"
http://jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
Input may optionally be inverted, shown to increase performance in many tasks in:
"Learning to Execute"
http://arxiv.org/abs/1410.4615
and
"Sequence to Sequence Learning with Neural Networks"
http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf
Theoretically it introduces shorter term dependencies between source and target.
Two digits inverted:
+ One layer JZS1 (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
Three digits inverted:
+ One layer JZS1 (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
Four digits inverted:
+ One layer JZS1 (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
Five digits inverted:
+ One layer JZS1 (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
"""
class CharacterTable(object):
"""
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilties to their character output
"""
def __init__(self, chars, maxlen):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
self.maxlen = maxlen
def encode(self, C, maxlen=None):
maxlen = maxlen if maxlen else self.maxlen
X = np.zeros((maxlen, len(self.chars)))
for i, c in enumerate(C):
X[i, self.char_indices[c]] = 1
return X
def decode(self, X, calc_argmax=True):
if calc_argmax:
X = X.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in X)
class colors:
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
# Parameters for the model and dataset
TRAINING_SIZE = 50000
DIGITS = 3
INVERT = True
# Try replacing JZS1 with LSTM, GRU, or SimpleRNN
RNN = recurrent.JZS1
HIDDEN_SIZE = 128
BATCH_SIZE = 128
LAYERS = 1
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+ '
ctable = CharacterTable(chars, MAXLEN)
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in xrange(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that X+Y == Y+X (hence the sorting)
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1
ans += ' ' * (DIGITS + 1 - len(ans))
if INVERT:
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print('Vectorization...')
X = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
X[i] = ctable.encode(sentence, maxlen=MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, maxlen=DIGITS + 1)
# Shuffle (X, y) in unison as the later parts of X will almost all be larger digits
X, y = shuffle(X, y)
# Explicitly set apart 10% for validation data that we never train over
split_at = len(X) - len(X) / 10
(X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
(y_train, y_val) = (y[:split_at], y[split_at:])
print('Build model...')
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
model.add(RNN(len(chars), HIDDEN_SIZE))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in xrange(LAYERS):
model.add(RNN(HIDDEN_SIZE, HIDDEN_SIZE, return_sequences=True))
# For each of step of the output sequence, decide which character should be chosen
model.add(Dense(HIDDEN_SIZE, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, 200):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=BATCH_SIZE, nb_epoch=1, validation_data=(X_val, y_val), show_accuracy=True)
###
# Select 10 samples from the validation set at random so we can visualize errors
for i in xrange(10):
ind = np.random.randint(0, len(X_val))
rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowX, verbose=0)
q = ctable.decode(rowX[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if INVERT else q)
print('T', correct)
print(colors.ok + '☑' + colors.close if correct == guess else colors.fail + '☒' + colors.close, guess)
print('---')
| mit |
chutsu/robotics | prototype/tests/msckf/test_feature_estimator.py | 1 | 9353 | import time
import random
import unittest
import cv2
import numpy as np
from numpy import dot
import matplotlib.pylab as plt
from prototype.utils.utils import deg2rad
from prototype.utils.quaternion.jpl import quat2rot as C
from prototype.utils.quaternion.jpl import euler2quat
from prototype.utils.quaternion.jpl import quatlcomp
from prototype.utils.transform import T_camera_global
from prototype.utils.transform import T_global_camera
from prototype.utils.transform import R_global_camera
# from prototype.utils.transform import R_camera_global
from prototype.data.kitti import RawSequence
from prototype.vision.common import focal_length
from prototype.vision.common import camera_intrinsics
from prototype.vision.common import rand3dfeatures
from prototype.vision.camera.camera_model import PinholeCameraModel
from prototype.vision.feature2d.keypoint import KeyPoint
from prototype.vision.feature2d.feature_track import FeatureTrack
from prototype.vision.feature2d.feature_tracker import FeatureTracker
from prototype.msckf.camera_state import CameraState
from prototype.msckf.feature_estimator import FeatureEstimator
# GLOBAL VARIABLE
RAW_DATASET = "/data/raw"
class FeatureEstimatorTest(unittest.TestCase):
def setUp(self):
# Pinhole Camera model
image_width = 640
image_height = 480
fov = 60
fx, fy = focal_length(image_width, image_height, fov)
cx, cy = (image_width / 2.0, image_height / 2.0)
K = camera_intrinsics(fx, fy, cx, cy)
self.cam_model = PinholeCameraModel(image_width, image_height, K)
# Feature estimator
self.estimator = FeatureEstimator()
def test_triangulate(self):
# Camera states
# -- Camera state 0
p_G_C0 = np.array([0.0, 0.0, 0.0])
rpy_C0G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
q_C0G = euler2quat(rpy_C0G)
C_C0G = C(q_C0G)
# -- Camera state 1
p_G_C1 = np.array([1.0, 1.0, 0.0])
rpy_C1G = np.array([deg2rad(0.0), deg2rad(0.0), deg2rad(0.0)])
q_C1G = euler2quat(rpy_C1G)
C_C1G = C(q_C1G)
# Features
landmark = np.array([0.0, 0.0, 10.0])
kp1 = self.cam_model.project(landmark, C_C0G, p_G_C0)[0:2]
kp2 = self.cam_model.project(landmark, C_C1G, p_G_C1)[0:2]
# Calculate rotation and translation of first and last camera states
# -- Obtain rotation and translation from camera 0 to camera 1
C_C0C1 = dot(C_C0G, C_C1G.T)
t_C0_C1C0 = dot(C_C0G, (p_G_C1 - p_G_C0))
# -- Convert from pixel coordinates to image coordinates
pt1 = self.cam_model.pixel2image(kp1)
pt2 = self.cam_model.pixel2image(kp2)
# Triangulate
p_C0_C1C0, r = self.estimator.triangulate(pt1, pt2, C_C0C1, t_C0_C1C0)
# Assert
self.assertTrue(np.allclose(p_C0_C1C0.ravel(), landmark))
def test_estimate(self):
nb_features = 100
bounds = {
"x": {"min": 5.0, "max": 10.0},
"y": {"min": -1.0, "max": 1.0},
"z": {"min": -1.0, "max": 1.0}
}
features = rand3dfeatures(nb_features, bounds)
dt = 0.1
p_G = np.array([0.0, 0.0, 0.0])
v_G = np.array([0.1, 0.0, 0.0])
q_CG = np.array([0.5, -0.5, 0.5, -0.5])
# Setup camera states
track_cam_states = []
for i in range(10):
p_G = p_G + v_G * dt
track_cam_states.append(CameraState(i, q_CG, p_G))
# Feature Track
track_length = 10
start = 0
end = track_length
for i in range(10):
feature_idx = random.randint(0, features.shape[1] - 1)
feature = T_camera_global * features[:, feature_idx]
print("feature in global frame:",
(T_global_camera * feature).ravel())
R_C0G = dot(R_global_camera, C(track_cam_states[0].q_CG))
R_C1G = dot(R_global_camera, C(track_cam_states[1].q_CG))
p_C_C0 = T_camera_global * track_cam_states[0].p_G
p_C_C1 = T_camera_global * track_cam_states[1].p_G
kp1 = self.cam_model.project(feature, R_C0G, p_C_C0)
kp2 = self.cam_model.project(feature, R_C1G, p_C_C1)
kp1 = KeyPoint(kp1.ravel()[:2], 21)
kp2 = KeyPoint(kp2.ravel()[:2], 21)
track = FeatureTrack(start, end, kp1, kp2)
track.ground_truth = T_global_camera * feature
for i in range(2, track_length):
R_CG = dot(R_global_camera, C(track_cam_states[i].q_CG))
p_C_Ci = T_camera_global * track_cam_states[i].p_G
kp = self.cam_model.project(feature, R_CG, p_C_Ci)
kp = KeyPoint(kp.ravel()[:2], 21)
# kp[0] += np.random.normal(0, 0.1)
# kp[1] += np.random.normal(0, 0.1)
track.update(i, kp)
# Estimate feature
p_G_f = self.estimator.estimate(self.cam_model,
track,
track_cam_states)
print("estimation: ", p_G_f.ravel())
print()
# Debug
# debug = False
# debug = True
# if debug:
# C_CG = C(track_cam_states[-1].q_CG)
# p_G_C = track_cam_states[-1].p_G
# p_C_f = dot(C_CG, (p_G_f - p_G_C))
# Assert
feature_G = T_global_camera * feature
self.assertTrue(abs(p_G_f[0, 0] - feature_G[0]) < 0.1)
self.assertTrue(abs(p_G_f[1, 0] - feature_G[1]) < 0.1)
self.assertTrue(abs(p_G_f[2, 0] - feature_G[2]) < 0.1)
@unittest.skip("skip")
def test_estimate2(self):
# Load RAW KITTI dataset
data = RawSequence(RAW_DATASET, "2011_09_26", "0001")
# data = RawSequence(RAW_DATASET, "2011_09_26", "0046")
# data = RawSequence(RAW_DATASET, "2011_09_26", "0005")
K = data.calib_cam2cam["P_rect_00"].reshape((3, 4))[0:3, 0:3]
cam_model = PinholeCameraModel(1242, 375, K)
# Initialize feature tracker
img = cv2.imread(data.image_00_files[0])
tracker = FeatureTracker()
tracker.update(img)
# Setup plot
features = None
features_plot = None
pos_data = data.get_local_position(0)
debug = False
if debug:
fig = plt.figure()
plt.ion()
ax = fig.add_subplot(111)
pos_plot = ax.plot(pos_data[0], pos_data[1],
marker=".", color="blue")[0]
# ax.set_xlim([-60.0, 5.0])
# ax.set_ylim([-60.0, 5.0])
ax.set_xlim([0.0, 50.0])
ax.set_ylim([-100.0, 0.0])
fig.canvas.draw()
plt.show(block=False)
# Setup feature tracks
tracks = []
for i in range(1, 10):
# Track features
img = cv2.imread(data.image_00_files[i])
tracker.update(img, True)
if cv2.waitKey(1) == 113:
exit(0)
tracks = tracker.remove_lost_tracks()
# Loop feature tracks
p_G_f = None
for track in tracks:
if track.tracked_length() < 8:
continue
# Setup feature track camera states
track_cam_states = []
for j in range(track.tracked_length()):
frame_id = track.frame_start + j
imu_q_IG = euler2quat(data.get_attitude(frame_id))
imu_p_G = data.get_local_position(frame_id)
ext_q_CI = np.array([0.5, -0.5, 0.5, -0.5])
cam_q_IG = dot(quatlcomp(ext_q_CI), imu_q_IG)
cam_p_G = imu_p_G
cam_state = CameraState(frame_id, cam_q_IG, cam_p_G)
track_cam_states.append(cam_state)
# Estimate feature track
p_G_f = self.estimator.estimate(cam_model,
track,
track_cam_states)
if p_G_f is not None:
C_CG = C(track_cam_states[-1].q_CG)
p_G_C = track_cam_states[-1].p_G
p_C_f = dot(C_CG, (p_G_f - p_G_C))
print("p_G_f: ", p_G_f.ravel())
print("p_C_f: ", p_C_f.ravel())
print()
# Plot
pos = data.get_local_position(i)
pos_data = np.hstack((pos_data, pos))
if debug:
if features is None and p_G_f is not None:
features = p_G_f
features_plot = ax.plot(p_G_f[0], p_G_f[1],
marker="x", color="red", ls='')[0]
elif p_G_f is not None:
features = np.hstack((features, p_G_f))
features_plot.set_xdata(features[0, :])
features_plot.set_ydata(features[1, :])
pos_plot.set_xdata(pos_data[0, :])
pos_plot.set_ydata(pos_data[1, :])
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
| gpl-3.0 |
ssanderson/pstats-view | setup.py | 1 | 1151 | from __future__ import print_function
import sys
from distutils.version import StrictVersion
from setuptools import setup
MIN_PIP_VERSION = StrictVersion('7.1.0')
setup(
name='pstats-view',
version='0.1',
description='A Graphical Viewer for CProfile Output',
author='Scott Sanderson',
author_email='[email protected]',
packages=['pstatsviewer'],
license='Apache 2.0',
include_package_data=True,
zip_safe=False,
classifiers=[
'Framework :: IPython',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
],
install_requires=[
'notebook',
'qgrid>=0.3.1',
'seaborn',
],
url="https://github.com/ssanderson/pstats-view"
)
| mit |
VillarrealA/pyoptools | pyoptools/gui/glwindow.py | 9 | 19554 | #!/usr/bin/env python
# This includes the two classes wxGLWindow and wxAdvancedGLWindow
# from OpenGL.TK in the PyOpenGL distribution
# ported to wxPython by greg Landrum
# modified by Y. Wong
# modified by R. Amezquita
# modified by O. Olarte
from OpenGL.GL import *
from OpenGL.GLU import *
from wx import *
from wx.glcanvas import *
import math
import os,sys
def test_data(npoints):
#A simple testing function that generate random triangles
# 10 random points (x,y) in the plane
import numpy
import matplotlib.delaunay as triang
x,y = numpy.array(numpy.random.standard_normal((2,npoints)))
z = numpy.array(numpy.random.standard_normal(npoints))
points=[]
for i in range(npoints):
points.append((x[i],y[i],z[i]))
cens,edg,tri,neig = triang.delaunay(x,y)
return points, tri
def glTranslateScene(s, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(s * (x - mousex), s * (mousey - y), 0.0)
glMultMatrixd(mat)
def glRotateScene(s, xcenter, ycenter, zcenter, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(xcenter, ycenter, zcenter)
glRotatef(s * (y - mousey), 1., 0., 0.)
glRotatef(s * (x - mousex), 0., 1., 0.)
glTranslatef(-xcenter, -ycenter, -zcenter)
glMultMatrixd(mat)
def v3distsq(a,b):
d = ( a[0] - b[0], a[1] - b[1], a[2] - b[2] )
return d[0]*d[0] + d[1]*d[1] + d[2]*d[2]
# This code is needed to avoid faults on sys.exit()
import sys
oldexitfunc = None
if hasattr(sys, 'exitfunc'):
oldexitfunc = sys.exitfunc
def cleanup():
if oldexitfunc: oldexitfunc()
sys.exitfunc = cleanup
class wxGLWindow(GLCanvas):
"""Implements a simple wxPython OpenGL window.
This class provides a simple window, into which GL commands can be issued. This is done by overriding the built in functions InitGL(), DrawGL(), and FinishGL(). The main difference between it and the plain wxGLCanvas is that it copes with refreshing and resizing the window"""
def __init__(self, parent,*args,**kw):
self.GL_uninitialised = 1
apply(GLCanvas.__init__,(self, parent)+args, kw)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
self.w, self.h = self.GetClientSizeTuple()
def __del__(self):
# self.SetCurrent()
self.FinishGL()
def InitGL(self):
"""OpenGL initialisation routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to set up the GL scene.
If it is not overridden, it defaults to setting an ambient light, setting the background colour to gray,
and enabling GL_DEPTH_TEST and GL_COLOR_MATERIAL."""
#set up lighting
glLightfv(GL_LIGHT0, GL_AMBIENT, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glClearColor(0.7,0.7,0.7,0.0)
glShadeModel(GL_SMOOTH)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def FinishGL(self):
"""OpenGL closing routine (to be overridden).
This routine should be overridden if necessary by any OpenGL commands need to be specified when deleting the GLWindow (e.g. deleting Display Lists)."""
pass
def DrawGL(self):
"""OpenGL drawing routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to draw the GL scene.
If it is not overridden, it defaults to drawing a colour cube."""
#Draw colour cube
glBegin(GL_QUAD_STRIP)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glEnd()
glBegin(GL_QUADS)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glEnd()
def wxSize(self, event = None):
"""Called when the window is resized"""
self.w,self.h = self.GetClientSizeTuple()
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, self.w, self.h)
self.Update()
def wxEraseBackground(self, event):
"""Routine does nothing, but prevents flashing"""
pass
def wxPaint(self, event=None):
"""Called on a paint event.
This sets the painting drawing context, then calls the base routine wxRedrawGL()"""
dc = PaintDC(self)
self.wxRedrawGL(event)
def wxRedraw(self, event=None):
"""Called on a redraw request
This sets the drawing context, then calls the base routine wxRedrawGL(). It can be called by the user when a refresh is needed"""
dc = ClientDC(self)
self.wxRedrawGL(event)
def wxRedrawGL(self, event=None):
"""This is the routine called when drawing actually takes place.
It needs to be separate so that it can be called by both paint events and by other events. It should not be called directly"""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised=0
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Flush
self.SwapBuffers() # Swap buffers
if event: event.Skip() # Pass event up
self.Update()
class wxAdvancedGLWindow(wxGLWindow):
"""Implements a wxPython OpenGL window allowing spinning, zooming, etc.
This class is derived from wxGLWindow, and can be used in exactly the
same way, by overriding the functions InitGL(), FinishGL(), and DrawGL()
with functions containing OpenGL commands. The window captures mouse
events, and keypresses. You might want to override some of these
functions if you need more sophisticated control"""
def __init__(self, parent,*args,**kw):
if kw.has_key('autospin_allowed'):
# Is the widget allowed to autospin?
self.autospin_allowed = kw['autospin_allowed']
del kw['autospin_allowed']
else:
self.autospin_allowed = 0
apply(wxGLWindow.__init__,(self, parent)+args, kw)
# The _back color
self.r_back = 0.
self.g_back = 0.
self.b_back = 0.
# Where the eye is
#self.base_distance = self.distance = 10.0
self.base_distance = self.distance = 1000.0
# Field of view in y direction
self.fovy = 30.0
# Position of clipping planes.
self.near = 0.1
self.far = 10000.0
# Where we are centering.
self.xcenter = 0.0
self.ycenter = 0.0
self.zcenter = 0.0
self.parent = parent
# Current coordinates of the mouse.
self.xmouse = 0
self.ymouse = 0
self.xspin = 0
self.yspin = 0
# Is the widget currently autospinning?
self.autospin = 0
self.initLeft = (0,0)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
EVT_CHAR(self,self.OnChar)
EVT_LEFT_DOWN(self,self.OnLeftClick)
EVT_LEFT_DCLICK(self,self.OnLeftDClick)
EVT_LEFT_UP(self,self.OnLeftUp)
EVT_MIDDLE_DOWN(self,self.OnMiddleClick)
EVT_RIGHT_DOWN(self,self.OnRightClick)
EVT_RIGHT_DCLICK(self,self.OnRightDClick)
EVT_MOTION(self,self.wxMouseMotion)
EVT_IDLE(self,self.wxIdle)
def wxIdle(self,event):
if self.autospin:
# self.do_AutoSpin(event) #doing it this way hogs the cpu
# event.RequestMore() #doing it this way hogs the cpu
WakeUpIdle()
self.do_AutoSpin(event)
event.Skip(1)
def OnChar(self,event):
key = event.GetKeyCode()
if key == ord('a'):
self.autospin_allowed = not self.autospin_allowed
if self.autospin:
self.autospin = 0
elif key == ord('q'):
self.parent.Destroy()
def OnLeftClick(self,event):
self.wxRecordMouse(event)
self.initLeft = event.GetX(),event.GetY()
def OnLeftDClick(self,event):
self.wxRecordMouse(event)
self.reset()
def OnLeftUp(self,event):
if not event.m_shiftDown:
self.wxAutoSpin(event)
def OnMiddleClick(self,event):
self.wxRecordMouse(event)
def OnRightClick(self,event):
self.wxRecordMouse(event)
def OnRightDClick(self,event):
self.wxRecordMouse(event)
self.distance=self.base_distance
self.wxRedraw()
def OnLeftDrag(self,event):
self.wxRotate(event)
def OnMiddleDrag(self,event):
self.wxTranslate(event)
def OnRightDrag(self,event):
self.wxScale(event)
def wxMouseMotion(self,event):
if not event.Dragging():
return
if event.LeftIsDown():
self.OnLeftDrag(event)
elif event.MiddleIsDown():
self.OnMiddleDrag(event)
elif event.RightIsDown():
self.OnRightDrag(event)
def report_opengl_errors(message = "OpenGL error:"):
"""Report any opengl errors that occured while drawing."""
while 1:
err_value = glGetError()
if not err_value: break
print message, gluErrorString(err_value)
def SetBgColour(self, r, g, b):
"""Change the background colour of the widget.
There seems to be a problem with this:"""
self.r_back = r
self.g_back = g
self.b_back = b
self.wxRedraw()
def SetCenterpoint(self, x, y, z):
"""Set the new center point for the model.
This is where we are looking."""
self.xcenter = x
self.ycenter = y
self.zcenter = z
self.wxRedraw()
def set_base_distance(self, distance):
"""Set how far the eye is from the position we are looking.
Sets the base distance, to which we are returned if we double click"""
self.base_distance = distance
def set_distance(self, distance):
"""Set how far the eye is from the position we are looking."""
self.distance = distance
self.wxRedraw()
def reset(self):
"""Reset rotation matrix for this widget."""
glMatrixMode(GL_MODELVIEW);
glLoadIdentity()
self.wxRedraw()
# def wxHandlePick(self, event):
# """Handle a pick on the scene."""
# pass
def wxRecordMouse(self, event):
"""Record the current mouse position."""
self.xmouse = event.GetX()
self.ymouse = event.GetY()
def wxStartRotate(self, event):
# Switch off any autospinning if it was happening
self.autospin = 0
self.wxRecordMouse(event)
def wxScale(self, event):
"""Scale the scene. Achieved by moving the eye position."""
scale = 1 - 0.01 * (event.GetY() - self.ymouse)
self.distance = self.distance * scale
self.wxRedraw()
self.wxRecordMouse(event)
def do_AutoSpin(self,event):
s = 0.5
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
self.yspin, self.xspin, 0, 0)
self.wxRedraw()
def wxAutoSpin(self, event):
"""Perform autospin of scene."""
if self.autospin_allowed:
self.autospin = 1
self.yspin = .1 * (event.GetX()-self.initLeft[0])
self.xspin = .1 * (event.GetY()-self.initLeft[1])
if self.xspin == 0 and self.yspin == 0:
self.autospin = 0
else:
self.do_AutoSpin(event)
def wxRotate(self, event):
"""Perform rotation of scene."""
if not event.m_shiftDown:
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
event.GetX(), event.GetY(), self.xmouse, self.ymouse)
else:
# rotate about z
sz = self.GetClientSizeTuple()
sz = (sz[0]/2, sz[1]/2)
xp = event.GetX()
yp = event.GetY()
dy = (self.ymouse-yp)
dx = (self.xmouse-xp)
if yp > sz[1]:
dx = dx * -1
if xp < sz[0]:
dy = dy * -1
d = dx + dy
glMatrixMode(GL_MODELVIEW);
m = glGetDouble(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(self.xcenter,self.ycenter,self.zcenter)
glRotatef(.5*d,0,0,1.)
glTranslatef(-self.xcenter,-self.ycenter,-self.zcenter)
#glMultMatrixd(ravel(m)) #from Numeric...
glMultMatrixd(m)
self.wxRedraw()
self.wxRecordMouse(event)
def wxTranslate(self, event):
"""Perform translation of scene."""
# Scale mouse translations to object viewplane so object tracks with mouse
win_height = max( 1,self.w)
obj_c = (self.xcenter, self.ycenter, self.zcenter)
win = gluProject( obj_c[0], obj_c[1], obj_c[2] )
obj = gluUnProject( win[0], win[1] + 0.5 * win_height, win[2] )
dist = math.sqrt( v3distsq( obj, obj_c ) )
scale = abs( dist / ( 0.5 * win_height ) )
glTranslateScene(scale, event.GetX(), event.GetY(), self.xmouse, self.ymouse)
self.wxRedraw()
self.wxRecordMouse(event)
def wxRedrawGL(self, event=None):
"""Method used to actually draw the scene.
This is more complex than in the wxGLWindow class from which this
class is derived, as we need to do rotations, translations, etc."""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised = 0
# Clear the background and depth buffer.
glClearColor(self.r_back, self.g_back, self.b_back, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION);
glLoadIdentity()
gluPerspective(self.fovy, float(self.w)/float(self.h), self.near, self.far)
gluLookAt(self.xcenter, self.ycenter, self.zcenter + self.distance,
self.xcenter, self.ycenter, self.zcenter,
0., 1., 0.)
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Tidy up
self.SwapBuffers()
if event: event.Skip()
#-----------------------------------------------------
if __name__ == '__main__':
from OpenGL.GLUT import *
import array #for creating the texture map
class MyApp(App):
def OnInit(self):
frame = Frame(None, -1, "wxPython OpenGL example", DefaultPosition, Size(400,400))
#win1 = wxGLWindow(frame, -1, Point(5,5), Size(190,190))
#win2 = wxAdvancedGLWindow(frame, -1, Point(205,5), Size(190,190), autospin_allowed = 0)
win3 = MyWin1_1(frame, -1, Point(5,205), Size(190,190), autospin_allowed = 0)
#win4 = MyWin2(frame, -1, Point(205,205), Size(190,190))
# win1.SetScrollbars(0,0,0,0)
# win4.SetScrollbars(0,0,0,0)
# win3.SetBgColour(0.0,0.0,1.0)
frame.Show(True)
self.SetTopWindow(frame)
return True
class MyWin1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow"""
def DrawGL(self):
glColor3f(1.0,0.3,0.3)
glutSolidCone(1.0,2,20,16)
glRotatef(180.0,0.0,1.0,0.0)
glColor3f(0.3,1.0,0.3)
glutSolidCone(1.0,1,20,16)
glLoadIdentity()
class MyWin1_1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow with basic triangles"""
def InitGL(self):
self.points,self.polylist=test_data(1000)
print self.points, self.polylist
def DrawGL(self):
glColor4f(.1,.7,.7, 0.5)
for p in self.polylist:
if len(p)==3:
p0=self.points[p[0]]
p1=self.points[p[1]]
p2=self.points[p[2]]
glBegin(GL_TRIANGLES) #Drawing Using Triangles
glVertex3f( p0[0], p0[1], p0[2])
glVertex3f( p1[0], p1[1], p1[2])
glVertex3f( p2[0], p2[1], p2[2])
glEnd()
class MyWin2(wxAdvancedGLWindow):
"""example using display lists"""
def InitGL(self):
self.uninitialised = 1
glClearColor (0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
self.stripeImageWidth=32
temp = array.array('B')
for x in range(5):
temp.fromlist([255,0,0,255])
for x in range(self.stripeImageWidth-5):
temp.fromlist([0,255,0,255])
self.stripeImage = temp.tostring()
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
self.texName=glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texName)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.stripeImageWidth,1,0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexImage2D(GL_TEXTURE_2D, 0, 4, self.stripeImageWidth, 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR)
glTexGenfv(GL_S, GL_EYE_PLANE, [1.0, 1.0, 1.0, 0.0])
glEnable(GL_TEXTURE_GEN_S);
glEnable(GL_TEXTURE_2D);
glEnable(GL_CULL_FACE);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_AUTO_NORMAL);
glEnable(GL_NORMALIZE);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glMaterialf (GL_FRONT, GL_SHININESS, 64.0);
self.DispList=glGenLists(1)
def DrawGL(self):
if self.uninitialised:
glNewList(self.DispList, GL_COMPILE)
glRotatef(45.0, 0.0, 0.0, 1.0);
glBindTexture(GL_TEXTURE_2D, self.texName);
glutSolidTeapot(2.0);
glEndList()
self.uninitialised = 0
glCallList(self.DispList)
def FinishGL(self):
if self.DispList:
glDeleteLists(self.DispList)
app = MyApp(0)
app.MainLoop()
| bsd-3-clause |
DLR-RM/amp | plot/plot_samples.py | 1 | 2551 | #!/usr/bin/python
import sys
import numpy as np
import scipy.io
import matplotlib as mpl
mpl.use('GTKAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os.path
def plot_results():
#
# Get the data sizes
#
n_nodes=0
samples = np.loadtxt("tree_samples_0.dat")
if (samples.ndim > 1):
n_nodes=samples.shape[0]
elif (samples.size > 1):
n_nodes=1
n_failed=0
if (os.path.isfile("failed_samples_0.dat")):
failed_samples = np.loadtxt("failed_samples_0.dat")
if (failed_samples.ndim > 1):
n_failed=failed_samples.shape[0]
elif (failed_samples.size > 1):
n_failed=1
#
# Load edge files and plot in 3d
#
fig = plt.figure(1)
ax = fig.add_subplot(111,projection='3d')
for x in range(0, n_failed):
new_edge = np.loadtxt("failed_tcps_"+str(x)+".dat")
# edges = np.append(edges, new_edge, axis=0)
ax.plot(new_edge[:,1], new_edge[:,2], new_edge[:,3], c='r',
linewidth='1.0', linestyle='-')
for x in range(0, n_nodes-1):
new_edge = np.loadtxt("tree_tcps_"+str(x)+".dat")
# edges = np.append(edges, new_edge, axis=0)
ax.plot(new_edge[:,1], new_edge[:,2], new_edge[:,3], c='b',
linewidth='2.0', linestyle='-')
#
# Plot nodes as scatter
#
if (n_failed > 1):
ax.scatter(failed_samples[:,1], failed_samples[:,2], failed_samples[:,3], c='r', marker='o')
elif (n_failed > 0):
ax.scatter(failed_samples[1], failed_samples[2], failed_samples[3], c='r', marker='o')
if (n_nodes > 1):
ax.scatter(samples[:,1], samples[:,2], samples[:,3], c='b', marker='^')
elif (n_nodes > 0):
ax.scatter(samples[1], samples[2], samples[3], c='b', marker='^')
ax.grid(True)
ax.autoscale(True)
ax.axis('tight')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
#
# Plot Quaternions as scatter
#
fig = plt.figure(2)
ax = fig.add_subplot(111,projection='3d')
if (n_failed > 1):
ax.scatter(failed_samples[:,4], failed_samples[:,5], failed_samples[:,6], c='r', marker='o')
elif (n_failed > 0):
ax.scatter(failed_samples[4], failed_samples[5], failed_samples[6], c='r', marker='o')
if (n_nodes > 1):
ax.scatter(samples[:,4], samples[:,5], samples[:,6], c='b', marker='^')
elif (n_nodes > 0):
ax.scatter(samples[4], samples[5], samples[6], c='b', marker='^')
ax.grid(True)
ax.autoscale(True)
ax.axis('tight')
ax.set_xlabel('Qx')
ax.set_ylabel('Qy')
ax.set_zlabel('Qz')
if __name__ == '__main__':
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
plot_results()
plt.show(True)
| lgpl-3.0 |
ngoix/OCRF | sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/superpca_vs_elasticnet.py | 1 | 2868 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Supervised PCA against LDA and QDA
=====================
A comparison of supervised PCA against elastic net.
The data are artificially created and the variables can be described by a
lower dimensional space.
Supervised PCA shows better results until about 100 features. The performance of all models
is similar after that point. A supervisedPCA model with a threshold=0.7 works with a smaller
number of components, while having similar performance to the rest of the models. This can be particularly
useful in situations where interpretability is important.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
from supervised_pca import SupervisedPCARegressor
from sklearn.linear_model import ElasticNet
total_range=50
performances={}
names = ["ElasticNet","SuperPCA thres=0","SuperPCA thres=0.1","SuperPCA thres=0.7"]
ncomponents={names[1]:[],names[2]:[],names[3]:[]}
for name in names:
performances[name]=[]
# iterate over classifiers
for i in range(1,total_range):
print(i)
classifiers = [
ElasticNet(),
SupervisedPCARegressor(threshold=0),
SupervisedPCARegressor(threshold=0.1),
SupervisedPCARegressor(threshold=0.7)
]
X, y = make_regression(n_features=i*5, n_informative=i*4,
random_state=1,effective_rank=i)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
for name, clf in zip(names, classifiers):
clf.fit(X_train, y_train)
pred=clf.predict(X_test)
score = np.mean(abs(y_test-pred))
performances[name].append(score)
try:
ncomponents[name].append(clf.get_n_components())
except:
pass
x=[k*5 for k in range(1,total_range)]
plt.figure()
plt.subplot(311)
plt.title("MAE against number of features")
for name in names:
plt.plot(x,performances[name])
plt.legend(labels=names,loc="best")
plt.subplot(312)
plt.title("MAE boxplots for each classifier")
dummy=[]
for name in names:
dummy.append(performances[name])
plt.boxplot(dummy,labels=names)
plt.subplot(313)
plt.title("Number of components against features")
plotcomponentsSPCA0=plt.plot(x,ncomponents[names[1]])
plotComponentsSPCA01=plt.plot(x,ncomponents[names[2]])
plotComponentsSPCA07=plt.plot(x,ncomponents[names[3]])
plt.legend([names[1],names[2],names[3]],loc="best")
| bsd-3-clause |
SKA-ScienceDataProcessor/algorithm-reference-library | tests/workflows/test_mpc_serial.py | 1 | 7817 | """ Unit tests for pipelines expressed via arlexecute
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from data_models.memory_data_models import Image, SkyModel
from data_models.memory_data_models import Skycomponent
from data_models.polarisation import PolarisationFrame
from wrappers.serial.skymodel.operations import expand_skymodel_by_skycomponents
from workflows.serial.skymodel.skymodel_serial import predict_skymodel_list_serial_workflow, \
invert_skymodel_list_serial_workflow, crosssubtract_datamodels_skymodel_list_serial_workflow
from workflows.shared.imaging.imaging_shared import sum_predict_results
from wrappers.serial.simulation.testing_support import ingest_unittest_visibility, \
create_low_test_skymodel_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.visibility.base import copy_visibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestMPC(unittest.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.plot = False
self.persist = False
def actualSetUp(self, freqwin=1, block=True, dopol=False, zerow=False):
self.npixel = 1024
self.low = create_named_configuration('LOWBD2', rmax=550.0)
self.freqwin = freqwin
self.blockvis_list = list()
self.ntimes = 5
self.cellsize = 0.0005
# Choose the interval so that the maximum change in w is smallish
integration_time = numpy.pi * (24 / (12 * 60))
self.times = numpy.linspace(-integration_time * (self.ntimes // 2), integration_time * (self.ntimes // 2),
self.ntimes)
if freqwin > 1:
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.frequency = numpy.array([1.0e8])
self.channelwidth = numpy.array([4e7])
if dopol:
self.vis_pol = PolarisationFrame('linear')
self.image_pol = PolarisationFrame('stokesIQUV')
f = numpy.array([100.0, 20.0, -10.0, 1.0])
else:
self.vis_pol = PolarisationFrame('stokesI')
self.image_pol = PolarisationFrame('stokesI')
f = numpy.array([100.0])
self.phasecentre = SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
self.blockvis_list = [ingest_unittest_visibility(self.low,
[self.frequency[freqwin]],
[self.channelwidth[freqwin]],
self.times,
self.vis_pol,
self.phasecentre, block=block,
zerow=zerow)
for freqwin, _ in enumerate(self.frequency)]
self.vis_list = [convert_blockvisibility_to_visibility(bv) for bv in self.blockvis_list]
self.skymodel_list = [create_low_test_skymodel_from_gleam
(npixel=self.npixel, cellsize=self.cellsize, frequency=[self.frequency[f]],
phasecentre=self.phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=0.6,
flux_threshold=1.0,
flux_max=5.0) for f, freq in enumerate(self.frequency)]
assert isinstance(self.skymodel_list[0].image, Image), self.skymodel_list[0].image
assert isinstance(self.skymodel_list[0].components[0], Skycomponent), self.skymodel_list[0].components[0]
assert len(self.skymodel_list[0].components) == 35, len(self.skymodel_list[0].components)
self.skymodel_list = expand_skymodel_by_skycomponents(self.skymodel_list[0])
assert len(self.skymodel_list) == 36, len(self.skymodel_list)
assert numpy.max(numpy.abs(self.skymodel_list[-1].image.data)) > 0.0, "Image is empty"
self.vis_list = [copy_visibility(self.vis_list[0], zero=True) for i, _ in enumerate(self.skymodel_list)]
def test_time_setup(self):
self.actualSetUp()
def test_predictcal(self):
self.actualSetUp(zerow=True)
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0], self.skymodel_list,
context='2d', docal=True)
vobs = sum_predict_results(skymodel_vislist)
if self.plot:
def plotvis(i, v):
import matplotlib.pyplot as plt
uvr = numpy.hypot(v.u, v.v)
amp = numpy.abs(v.vis[:, 0])
plt.plot(uvr, amp, '.')
plt.title(str(i))
plt.show()
plotvis(0, vobs)
def test_invertcal(self):
self.actualSetUp(zerow=True)
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0], self.skymodel_list,
context='2d', docal=True)
result_skymodel = [SkyModel(components=None, image=self.skymodel_list[-1].image)
for v in skymodel_vislist]
results = invert_skymodel_list_serial_workflow(skymodel_vislist, result_skymodel,
context='2d', docal=True)
assert numpy.max(numpy.abs(results[0][0].data)) > 0.0
assert numpy.max(numpy.abs(results[0][1])) > 0.0
if self.plot:
import matplotlib.pyplot as plt
from wrappers.serial.image.operations import show_image
show_image(results[0][0], title='Dirty image, no cross-subtraction', vmax=0.1, vmin=-0.01)
plt.show()
def test_crosssubtract_datamodel(self):
self.actualSetUp(zerow=True)
skymodel_vislist = predict_skymodel_list_serial_workflow(self.vis_list[0], self.skymodel_list,
context='2d', docal=True)
vobs = sum_predict_results(skymodel_vislist)
skymodel_vislist = crosssubtract_datamodels_skymodel_list_serial_workflow(vobs, skymodel_vislist)
result_skymodel = [SkyModel(components=None, image=self.skymodel_list[-1].image)
for v in skymodel_vislist]
results = invert_skymodel_list_serial_workflow(skymodel_vislist, result_skymodel,
context='2d', docal=True)
assert numpy.max(numpy.abs(results[0][0].data)) > 0.0
assert numpy.max(numpy.abs(results[0][1])) > 0.0
if self.plot:
import matplotlib.pyplot as plt
from wrappers.serial.image.operations import show_image
show_image(results[0][0], title='Dirty image after cross-subtraction', vmax=0.1, vmin=-0.01)
plt.show()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dhermes/google-cloud-python | bigquery/google/cloud/bigquery/client.py | 2 | 69036 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from __future__ import absolute_import
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
import functools
import gzip
import os
import uuid
import six
from google import resumable_media
from google.resumable_media.requests import MultipartUpload
from google.resumable_media.requests import ResumableUpload
from google.api_core import page_iterator
import google.cloud._helpers
from google.cloud import exceptions
from google.cloud.client import ClientWithProject
from google.cloud.bigquery._helpers import _SCALAR_VALUE_TO_JSON_ROW
from google.cloud.bigquery._helpers import _str_or_none
from google.cloud.bigquery._http import Connection
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetListItem
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery import job
from google.cloud.bigquery.query import _QueryResults
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TableListItem
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import RowIterator
from google.cloud.bigquery.table import _TABLE_HAS_NO_SCHEMA
from google.cloud.bigquery.table import _row_from_mapping
_DEFAULT_CHUNKSIZE = 1048576 # 1024 * 1024 B = 1 MB
_MAX_MULTIPART_SIZE = 5 * 1024 * 1024
_DEFAULT_NUM_RETRIES = 6
_BASE_UPLOAD_TEMPLATE = (
u"https://www.googleapis.com/upload/bigquery/v2/projects/"
u"{project}/jobs?uploadType="
)
_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"multipart"
_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"resumable"
_GENERIC_CONTENT_TYPE = u"*/*"
_READ_LESS_THAN_SIZE = (
"Size {:d} was specified but the file-like object only had " "{:d} bytes remaining."
)
class Project(object):
"""Wrapper for resource describing a BigQuery project.
:type project_id: str
:param project_id: Opaque ID of the project
:type numeric_id: int
:param numeric_id: Numeric ID of the project
:type friendly_name: str
:param friendly_name: Display name of the project
"""
def __init__(self, project_id, numeric_id, friendly_name):
self.project_id = project_id
self.numeric_id = numeric_id
self.friendly_name = friendly_name
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct an instance from a resource dict."""
return cls(resource["id"], resource["numericId"], resource["friendlyName"])
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
Args:
project (str):
Project ID for the project which the client acts on behalf of.
Will be passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
credentials (google.auth.credentials.Credentials):
(Optional) The OAuth2 Credentials to use for this client. If not
passed (and if no ``_http`` object is passed), falls back to the
default inferred from the environment.
_http (requests.Session):
(Optional) HTTP object to make requests. Can be any object that
defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an ``_http``
object is created that is bound to the ``credentials`` for the
current object.
This parameter should be considered private, and could change in
the future.
location (str):
(Optional) Default location for jobs / datasets / tables.
default_query_job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Default ``QueryJobConfig``.
Will be merged into job configs passed into the ``query`` method.
Raises:
google.auth.exceptions.DefaultCredentialsError:
Raised if ``credentials`` is not specified and the library fails
to acquire default credentials.
"""
SCOPE = (
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
)
"""The scopes required for authenticating as a BigQuery consumer."""
def __init__(
self,
project=None,
credentials=None,
_http=None,
location=None,
default_query_job_config=None,
):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http
)
self._connection = Connection(self)
self._location = location
self._default_query_job_config = default_query_job_config
@property
def location(self):
"""Default location for jobs / datasets / tables."""
return self._location
def get_service_account_email(self, project=None):
"""Get the email address of the project's BigQuery service account
Note:
This is the service account that BigQuery uses to manage tables
encrypted by a key in KMS.
Args:
project (str, optional):
Project ID to use for retreiving service account email.
Defaults to the client's project.
Returns:
str: service account email address
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> client.get_service_account_email()
[email protected]
"""
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._connection.api_request(method="GET", path=path)
return api_response["email"]
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
)
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def dataset(self, dataset_id, project=None):
"""Construct a reference to a dataset.
:type dataset_id: str
:param dataset_id: ID of the dataset.
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
:rtype: :class:`google.cloud.bigquery.dataset.DatasetReference`
:returns: a new ``DatasetReference`` instance
"""
if project is None:
project = self.project
return DatasetReference(project, dataset_id)
def create_dataset(self, dataset):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
api_response = self._connection.api_request(method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
def create_table(self, table):
"""API call: create a table via a PUT request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.table.Table` to create.
If ``table`` is a reference, an empty table is created
with the specified ID. The dataset that the table belongs to
must already exist.
Returns:
google.cloud.bigquery.table.Table:
A new ``Table`` returned from the service.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if isinstance(table, TableReference):
table = Table(table)
path = "/projects/%s/datasets/%s/tables" % (table.project, table.dataset_id)
api_response = self._connection.api_request(
method="POST", path=path, data=table.to_api_repr()
)
return Table.from_api_repr(api_response)
def _call_api(self, retry, **kwargs):
call = functools.partial(self._connection.api_request, **kwargs)
if retry:
call = retry(call)
return call()
def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY):
"""Fetch the dataset referenced by ``dataset_ref``
Args:
dataset_ref (Union[ \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
dataset reference from a string using
:func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A ``Dataset`` instance.
"""
if isinstance(dataset_ref, str):
dataset_ref = DatasetReference.from_string(
dataset_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=dataset_ref.path)
return Dataset.from_api_repr(api_response)
def get_table(self, table_ref, retry=DEFAULT_RETRY):
"""Fetch the table referenced by ``table_ref``.
Args:
table_ref (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance.
"""
if isinstance(table_ref, str):
table_ref = TableReference.from_string(
table_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=table_ref.path)
return Table.from_api_repr(api_response)
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response)
def update_table(self, table, fields, retry=DEFAULT_RETRY):
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, it will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
Args:
table (google.cloud.bigquery.table.Table): The table to update.
fields (Sequence[str]):
The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.table.Table:
The table resource returned from the API call.
"""
partial = table._build_resource(fields)
if table.etag is not None:
headers = {"If-Match": table.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=table.path, data=partial, headers=headers
)
return Table.from_api_repr(api_response)
def list_tables(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""List tables in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose tables to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of tables to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the tables. If
not passed, the API will return the first page of tables. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.table.TableListItem` contained
within the requested dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "%s/tables" % dataset.path
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_table,
items_key="tables",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def delete_dataset(self, dataset, delete_contents=False, retry=DEFAULT_RETRY):
"""Delete a dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
Args
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to delete. If a string is passed
in, this method attempts to create a dataset reference from a
string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
delete_contents (boolean):
(Optional) If True, delete all the tables in the dataset. If
False and the dataset contains tables, the request will fail.
Default is False.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset or a DatasetReference")
params = {}
if delete_contents:
params["deleteContents"] = "true"
self._call_api(retry, method="DELETE", path=dataset.path, query_params=params)
def delete_table(self, table, retry=DEFAULT_RETRY):
"""Delete a table
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/delete
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to delete. If a string is passed in,
this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if not isinstance(table, (Table, TableReference)):
raise TypeError("table must be a Table or a TableReference")
self._call_api(retry, method="DELETE", path=table.path)
def _get_query_results(
self, job_id, retry, project=None, timeout_ms=None, location=None
):
"""Get the query results object for a query job.
Arguments:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
project (str):
(Optional) project ID for the query job (defaults to the
project of the client).
timeout_ms (int):
(Optional) number of milliseconds the the API call should
wait for the query to complete before the request times out.
location (str): Location of the query job.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
"""
extra_params = {"maxResults": 0}
if project is None:
project = self.project
if timeout_ms is not None:
extra_params["timeoutMs"] = timeout_ms
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/queries/{}".format(project, job_id)
# This call is typically made in a polling loop that checks whether the
# job is complete (from QueryJob.done(), called ultimately from
# QueryJob.result()). So we don't need to poll here.
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return _QueryResults.from_api_repr(resource)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self)
def get_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Fetch a job for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which ownsthe job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}".format(project, job_id)
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return self.job_from_resource(resource)
def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Attempt to cancel a job from a job ID.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which owns the job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
resource = self._call_api(
retry, method="POST", path=path, query_params=extra_params
)
return self.job_from_resource(resource["job"])
def list_jobs(
self,
project=None,
max_results=None,
page_token=None,
all_users=None,
state_filter=None,
retry=DEFAULT_RETRY,
min_creation_time=None,
max_creation_time=None,
):
"""List jobs for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
Args:
project (str, optional):
Project ID to use for retreiving datasets. Defaults
to the client's project.
max_results (int, optional):
Maximum number of jobs to return.
page_token (str, optional):
Opaque marker for the next "page" of jobs. If not
passed, the API will return the first page of jobs. The token
marks the beginning of the iterator to be returned and the
value of the ``page_token`` can be accessed at
``next_page_token`` of
:class:`~google.api_core.page_iterator.HTTPIterator`.
all_users (bool, optional):
If true, include jobs owned by all users in the project.
Defaults to :data:`False`.
state_filter (str, optional):
If set, include only jobs matching the given state. One of:
* ``"done"``
* ``"pending"``
* ``"running"``
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
min_creation_time (datetime.datetime, optional):
Min value for job creation time. If set, only jobs created
after or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
max_creation_time (datetime.datetime, optional):
Max value for job creation time. If set, only jobs created
before or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
Returns:
google.api_core.page_iterator.Iterator:
Iterable of job instances.
"""
extra_params = {
"allUsers": all_users,
"stateFilter": state_filter,
"minCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(min_creation_time)
),
"maxCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(max_creation_time)
),
"projection": "full",
}
extra_params = {
param: value for param, value in extra_params.items() if value is not None
}
if project is None:
project = self.project
path = "/projects/%s/jobs" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_job,
items_key="jobs",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, six.string_types):
source_uris = [source_uris]
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry)
return load_job
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json())
def load_table_from_dataframe(
self,
dataframe,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of a table from a pandas DataFrame.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
dataframe (pandas.DataFrame):
A :class:`~pandas.DataFrame` containing the data to load.
destination (google.cloud.bigquery.table.TableReference):
The destination table to use for loading the data. If it is an
existing table, the schema of the :class:`~pandas.DataFrame`
must match the schema of the destination table. If the table
does not yet exist, the schema is inferred from the
:class:`~pandas.DataFrame`.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
num_retries (int, optional): Number of upload retries.
job_id (str, optional): Name of the job.
job_id_prefix (str, optional):
The user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str, optional):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig, optional):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ImportError:
If a usable parquet engine cannot be found. This method
requires :mod:`pyarrow` to be installed.
"""
buffer = six.BytesIO()
dataframe.to_parquet(buffer)
if job_config is None:
job_config = job.LoadJobConfig()
job_config.source_format = job.SourceFormat.PARQUET
if location is None:
location = self.location
return self.load_table_from_file(
buffer,
destination,
num_retries=num_retries,
rewind=True,
job_id=job_id,
job_id_prefix=job_id_prefix,
location=location,
project=project,
job_config=job_config,
)
def _do_resumable_upload(self, stream, metadata, num_retries):
"""Perform a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the final chunk
is uploaded.
"""
upload, transport = self._initiate_resumable_upload(
stream, metadata, num_retries
)
while not upload.finished:
response = upload.transmit_next_chunk(transport)
return response
def _initiate_resumable_upload(self, stream, metadata, num_retries):
"""Initiate a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: tuple
:returns:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload.
"""
chunk_size = _DEFAULT_CHUNKSIZE
transport = self._http
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _RESUMABLE_URL_TEMPLATE.format(project=self.project)
# TODO: modify ResumableUpload to take a retry.Retry object
# that it can use for the initial RPC.
upload = ResumableUpload(upload_url, chunk_size, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
upload.initiate(
transport, stream, metadata, _GENERIC_CONTENT_TYPE, stream_final=False
)
return upload, transport
def _do_multipart_upload(self, stream, metadata, size, num_retries):
"""Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE)
return response
def copy_table(
self,
sources,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Copy one or more tables to another table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy
Arguments:
sources (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
Sequence[ \
:class:`~google.cloud.bigquery.table.TableReference`], \
]):
Table or tables to be copied.
destination (Union[
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be copied.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of any
source table as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.CopyJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.CopyJob: A new copy job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(sources, str):
sources = TableReference.from_string(sources, default_project=self.project)
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
if not isinstance(sources, collections_abc.Sequence):
sources = [sources]
copy_job = job.CopyJob(
job_ref, sources, destination, client=self, job_config=job_config
)
copy_job._begin(retry=retry)
return copy_job
def extract_table(
self,
source,
destination_uris,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Start a job to extract a table into Cloud Storage files.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract
Arguments:
source (Union[ \
:class:`google.cloud.bigquery.table.TableReference`, \
src, \
]):
Table to be extracted.
destination_uris (Union[str, Sequence[str]]):
URIs of Cloud Storage file(s) into which table data is to be
extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
source table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.ExtractJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
:type source: :class:`google.cloud.bigquery.table.TableReference`
:param source: table to be extracted.
Returns:
google.cloud.bigquery.job.ExtractJob: A new extract job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source, str):
source = TableReference.from_string(source, default_project=self.project)
if isinstance(destination_uris, six.string_types):
destination_uris = [destination_uris]
extract_job = job.ExtractJob(
job_ref, source, destination_uris, client=self, job_config=job_config
)
extract_job._begin(retry=retry)
return extract_job
def query(
self,
query,
job_config=None,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
retry=DEFAULT_RETRY,
):
"""Run a SQL query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query
Arguments:
query (str):
SQL query to be executed. Defaults to the standard SQL
dialect. Use the ``job_config`` parameter to change dialects.
Keyword Arguments:
job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Extra configuration options for the job.
To override any options that were previously set in
the ``default_query_job_config`` given to the
``Client`` constructor, manually set those options to ``None``,
or whatever value is preferred.
job_id (str): (Optional) ID to use for the query job.
job_id_prefix (str):
(Optional) The prefix to use for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (str):
Location where to run the job. Must match the location of the
any table used in the query as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.QueryJob: A new query job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
if self._default_query_job_config:
if job_config:
# anything that's not defined on the incoming
# that is in the default,
# should be filled in with the default
# the incoming therefore has precedence
job_config = job_config._fill_from_default(
self._default_query_job_config
)
else:
job_config = self._default_query_job_config
job_ref = job._JobReference(job_id, project=project, location=location)
query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config)
query_job._begin(retry=retry)
return query_job
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if selected_fields is not None:
schema = selected_fields
elif isinstance(table, TableReference):
raise ValueError("need selected_fields with TableReference")
elif isinstance(table, Table):
if len(table.schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
schema = table.schema
else:
raise TypeError("table should be Table or TableReference")
json_rows = []
for index, row in enumerate(rows):
if isinstance(row, dict):
row = _row_from_mapping(row, schema)
json_row = {}
for field, value in zip(schema, row):
converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)
if converter is not None: # STRING doesn't need converting
value = converter(value)
json_row[field.name] = value
json_rows.append(json_row)
return self.insert_rows_json(table, json_rows, **kwargs)
def insert_rows_json(
self,
table,
json_rows,
row_ids=None,
skip_invalid_rows=None,
ignore_unknown_values=None,
template_suffix=None,
retry=DEFAULT_RETRY,
):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ \
:class:`~google.cloud.bigquery.table.Table` \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
rows_info = []
data = {"rows": rows_info}
for index, row in enumerate(json_rows):
info = {"json": row}
if row_ids is not None:
info["insertId"] = row_ids[index]
else:
info["insertId"] = str(uuid.uuid4())
rows_info.append(info)
if skip_invalid_rows is not None:
data["skipInvalidRows"] = skip_invalid_rows
if ignore_unknown_values is not None:
data["ignoreUnknownValues"] = ignore_unknown_values
if template_suffix is not None:
data["templateSuffix"] = template_suffix
# We can always retry, because every row has an insert ID.
response = self._call_api(
retry, method="POST", path="%s/insertAll" % table.path, data=data
)
errors = []
for error in response.get("insertErrors", ()):
errors.append({"index": int(error["index"]), "errors": error["errors"]})
return errors
def list_partitions(self, table, retry=DEFAULT_RETRY):
"""List the partitions in a table.
Arguments:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table or reference from which to get partition info
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
List[str]:
A list of the partition ids present in the partitioned table
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
meta_table = self.get_table(
TableReference(
self.dataset(table.dataset_id, project=table.project),
"%s$__PARTITIONS_SUMMARY__" % table.table_id,
)
)
subset = [col for col in meta_table.schema if col.name == "partition_id"]
return [
row[0]
for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)
]
def list_rows(
self,
table,
selected_fields=None,
max_results=None,
page_token=None,
start_index=None,
page_size=None,
retry=DEFAULT_RETRY,
):
"""List the rows of the table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
.. note::
This method assumes that the provided schema is up-to-date with the
schema as defined on the back-end: if the two schemas are not
identical, the values returned may be incomplete. To ensure that the
local copy of the schema is up-to-date, call ``client.get_table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table to list, or a reference to it.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField` \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
max_results (int):
(Optional) maximum number of rows to return.
page_token (str):
(Optional) Token representing a cursor into the table's rows.
If not passed, the API will return the first page of the
rows. The token marks the beginning of the iterator to be
returned and the value of the ``page_token`` can be accessed
at ``next_page_token`` of the
:class:`~google.cloud.bigquery.table.RowIterator`.
start_index (int):
(Optional) The zero-based index of the starting row to read.
page_size (int):
(Optional) The maximum number of items to return per page in
the iterator.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s. During each
page, the iterator will have the ``total_rows`` attribute
set, which counts the total number of rows **in the table**
(this is distinct from the total number of rows in the
current page: ``iterator.page.num_items``).
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if selected_fields is not None:
schema = selected_fields
elif isinstance(table, TableReference):
raise ValueError("need selected_fields with TableReference")
elif isinstance(table, Table):
if len(table.schema) == 0 and table.created is None:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
schema = table.schema
else:
raise TypeError("table should be Table or TableReference")
params = {}
if selected_fields is not None:
params["selectedFields"] = ",".join(field.name for field in selected_fields)
if start_index is not None:
params["startIndex"] = start_index
row_iterator = RowIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="%s/data" % (table.path,),
schema=schema,
page_token=page_token,
max_results=max_results,
page_size=page_size,
extra_params=params,
)
return row_iterator
# pylint: disable=unused-argument
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a project.
:rtype: :class:`.Project`
:returns: The next project in the page.
"""
return Project.from_api_repr(resource)
# pylint: enable=unused-argument
def _item_to_dataset(iterator, resource):
"""Convert a JSON dataset to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a dataset.
:rtype: :class:`.DatasetListItem`
:returns: The next dataset in the page.
"""
return DatasetListItem(resource)
def _item_to_job(iterator, resource):
"""Convert a JSON job to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a job.
:rtype: job instance.
:returns: The next job in the page.
"""
return iterator.client.job_from_resource(resource)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a table.
:rtype: :class:`~google.cloud.bigquery.table.Table`
:returns: The next table in the page.
"""
return TableListItem(resource)
def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
"""
if job_id is not None:
return job_id
elif prefix is not None:
return str(prefix) + str(uuid.uuid4())
else:
return str(uuid.uuid4())
def _check_mode(stream):
"""Check that a stream was opened in read-binary mode.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute
and is not among ``rb``, ``r+b`` or ``rb+``.
"""
mode = getattr(stream, "mode", None)
if isinstance(stream, gzip.GzipFile):
if mode != gzip.READ:
raise ValueError(
"Cannot upload gzip files opened in write mode: use "
"gzip.GzipFile(filename, mode='rb')"
)
else:
if mode is not None and mode not in ("rb", "r+b", "rb+"):
raise ValueError(
"Cannot upload files opened in text mode: use "
"open(filename, mode='rb') or open(filename, mode='r+b')"
)
def _get_upload_headers(user_agent):
"""Get the headers for an upload request.
:type user_agent: str
:param user_agent: The user-agent for requests.
:rtype: dict
:returns: The headers to be used for the request.
"""
return {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"User-Agent": user_agent,
"content-type": "application/json",
}
| apache-2.0 |
OneOneFour/ICSP_Monte_Carlo | Basic Sim.py | 1 | 8531 | import sys
import time
from datetime import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import ProjectFunctions as pf
import lotkavolterra as lv
#sys.stdout = open("output/" + dt.now().ctime().replace(":", "") + "output.txt", 'w')
seed = int(time.time())
npr.seed(seed)
print("SEED - " + str(seed))
debug = False
class World:
predCounter = []
preyCounter = []
population = None
addQueue = None
t = 0
def __init__(self, gridsize):
self.gridsize = gridsize
World.population = {}
World.addQueue = {}
def step(self):
if debug:
print("---------- BEGIN STEP " + str(self.t) + " ----------")
self.addQueue.clear()
self.predCounter.append(len(self.population["Predator"]))
self.preyCounter.append(len(self.population["Prey"]))
if debug:
print("Prey count:" + str(self.preyCounter[self.t]) + " pred count:" + str(self.predCounter[self.t]))
self.t += 1
for key in self.population:
for ani in self.population[key]:
ani.step(self)
# cull the old
for key in self.population:
if key in self.addQueue:
self.population[key].extend(self.addQueue[key])
for ani in self.population[key][:]:
if not ani.alive:
self.population[key].remove(ani)
for key in self.population:
for beast in self.population[key]:
beast.move()
def getPreyCount(self):
return len(self.getPrey())
def getPrey(self):
a = [prey for prey in self.population['Prey'] if prey.alive]
return a
def getPredators(self):
return self.population['Predator']
def Spawn(self, animal):
if debug:
print("SPAWN: " + animal.name + "_" + str(animal.id))
if animal.name not in self.addQueue:
self.addQueue[animal.name] = []
self.addQueue[animal.name].append(animal)
def randSpawnPredator(self, mkill, stdkill, mgrow, stdgrow, mexpect, stdexpect, count, killRange):
loc = [npr.uniform(self.gridsize), npr.uniform(self.gridsize)]
self.population['Predator'] = [Predator(mkill, stdkill, mgrow, stdgrow, mexpect, stdexpect, "Predator", loc, killRange) for a in
range(count)]
def randSpawnPrey(self, mgrow, stdgrow, mexpext, stdexpect, count):
loc = [npr.uniform(self.gridsize), npr.uniform(self.gridsize)]
self.population['Prey'] = [Prey(mgrow, stdgrow, mexpext, stdexpect, "Prey", loc) for a in range(count)]
def showGrid(self):
for key in self.population:
print(key)
gridArray = np.zeros((self.gridsize, self.gridsize))
for beast in self.population[key]:
gridArray[beast.loc[0]][beast.loc[1]] += 1
print (gridArray)
class Animal:
id = 0
count = 0
name = "Animal"
alive = True
mExpect = 0
stdExpect = 0
lifeExpect = 0 # Mean age of death #std dev is 1.5 steps?
age = 0
loc = []
#pmove = 0
def __init__(self, meanExpectancey, stdExpectancy, name, loc):
self.id = Animal.count
Animal.count += 1
self.lifeExpect = round(npr.normal(meanExpectancey, stdExpectancy))
self.name = name
self.mExpect = meanExpectancey
self.stdExpect = stdExpectancy
self.loc = loc[:]
def step(self, world):
self.age += 1
if self.age > self.lifeExpect:
self.kill()
return
def kill(self):
if debug:
print("KILL: " + self.name + "_" + str(self.id))
self.alive = False
def move(self):
#if self.move > npr.uniform():
#self.loc = [(elem+(npr.randint(3)-1))%world.gridsize for elem in loc] TODO sort this out, into one line
self.loc[1] += (npr.randint(3)-1)
self.loc[1] %= world.gridsize
self.loc[0] += (npr.randint(3)-1)
self.loc[0] %= world.gridsize
class Predator(Animal):
pkill = 0
mkill = 0
stdkill = 0
mgrow = 0
stdgrow = 0
killRange = 0
def __init__(self, mkill, stdkill, mgrow, stdgrow, mexpect, stdexpect, name, loc, killRange):
Animal.__init__(self, mexpect, stdexpect, name, loc)
self.mkill = mkill
self.stdkill = stdkill
self.mgrow = mgrow
self.stdgrow = stdgrow
self.pkill = npr.normal(mkill, stdkill)
self.killRange = killRange
self.pbirth = npr.normal(mgrow, stdgrow)
if debug:
print("PREDBIRTH: " + " lifeexpect:" + str(self.lifeExpect) + " killprob:" + str(
self.pkill) + " growProb:" + str(self.pbirth))
def step(self, world):
Animal.step(self, world)
if not self.alive:
return
self.eat(world.getPrey(), world) # get the prey
def eat(self, preytot):
prey = [food for food in preytot if food.loc == self.loc]
for meal in range(culmBinom(self.pkill, len(prey))):
prey[meal].kill()
if npr.uniform() < self.pbirth:
world.Spawn(Predator(self.mkill, self.stdkill, self.mgrow, self.stdgrow,
self.mExpect, self.stdExpect, self.name, self.loc))
# Spawn Baby next step
class Prey(Animal): # mean number of babies each step
stdgrow = 0
mgrow = 0
pgrow = 0
def __init__(self, mgrow, stdgrow, mExpect, stdExpect, name,loc):
Animal.__init__(self, mExpect, stdExpect, name,loc)
self.mgrow = mgrow
self.stdgrow = stdgrow
self.pgrow = npr.normal(self.mgrow, self.stdgrow)
if debug:
print("PREYBIRTH:" + "lifeexpect:" + str(self.lifeExpect) + " growProb: " + str(self.pgrow))
def step(self, world):
if not self.alive:
return
self.rollGrow(world)
Animal.step(self, world)
def rollGrow(self, world):
roll = npr.uniform()
if roll < self.pgrow:
world.Spawn(Prey(self.mgrow, self.stdgrow, self.mExpect, self.stdExpect, self.name,self.loc))
tscale = 1
killRange = 1
prey0, pred0 = 50, 75
alpha, beta, delta, gamma = 0.5, 0.5, 1.5, 0.7
alpha1, beta1, delta1, gamma1 = alpha / tscale, beta / (pred0 * tscale), delta / (prey0 * tscale), gamma / tscale
world = World(2)
world.randSpawnPrey(alpha1, 0.05 / (alpha1 * tscale), 5 * tscale, tscale, prey0)
world.randSpawnPredator(beta1 / (alpha1 + 1), 0.01 / tscale, delta1 / (beta1), beta1 / (delta1 * tscale), 1 / gamma1,
tscale, pred0, killRange)
##Remeber p = beta/(alpha+1)
#i = 20 * tscale
#for c in range(i):
# world.step()
# world.showGrid()
#plt.plot(np.arange(i), world.preyCounter, 'b-', label="prey")
#plt.plot(np.arange(i), world.predCounter, 'r-', label="predator")
#plt.legend()
#filename = "output/" + (datetime.datetime.now().ctime() + "output").replace(":", "")
#plt.gcf().savefig(filename + ".png")
#plt.show()
#pf.saveValues(alpha, beta, gamma, delta, prey0, pred0, filename + ".csv")
# Output
def runSim(alpha, beta, gamma, delta, s0, stop=10, steps=10, scale=1):
alpha1, beta1, gamma1, delta1 = alpha / steps, beta / (scale * s0[1] * steps), gamma / steps, delta / (
scale * s0[0] * steps)
world = World()
if debug:
print("----- START ----")
print(
"alpha = " + str(alpha1) + " beta = " + str(beta1) + " delta = " + str(delta1) + " gamma = " + str(gamma1))
world.SpawnPrey(alpha1, 0.5 / steps, 500 * steps, 1 * steps, int(s0[0] * scale))
world.SpawnPredator(beta1 / (alpha1 + 1), 0.01 / steps, delta1 / beta1, 0.1 / steps, 1 / gamma1, 1 * steps,
int(s0[1] * scale))
for i in range(stop * steps):
world.step()
return [world.preyCounter, world.predCounter], np.linspace(0, stop, steps * stop)
alpha, beta, gamma, delta, s0 = 0.67, 1.33, 1, 1, [1, 0.75]
(eq, te) = lv.lotkavolterragraph(alpha, beta, gamma, delta, s0, 10, 10)
(sim, ts) = runSim(alpha, beta, gamma, delta, s0, 10, 10, 100)
fig, axes = plt.subplots(nrows=2, figsize=(20, 10))
axes[0].plot(te, eq[:, 0], linewidth=1, label="prey")
axes[0].plot(te, eq[:, 1], linewidth=1, label="pred")
axes[1].plot(ts, sim[0], linewidth=1, label="prey")
axes[1].plot(ts, sim[1], linewidth=1, label="pred")
for ax in range(2):
axes[ax].set_xlabel("Time")
axes[ax].set_ylabel("Species Count")
axes[ax].legend()
plt.show()
| mit |
Davidjohnwilson/sympy | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/fourier_ad_dense_test.py | 1 | 1880 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 14:28:22 2016
@author: suraj
"""
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
from keras.models import Graph
from keras.models import model_from_json
from pandas import DataFrame
import pandas as pd
import pickle
#import os
#os.system('python lstm_ad_random.py')
# CONSTANTS
tsteps = 12
batch_size = 1
epochs = 10
attsize = 3
inputs = pickle.load(open('x_att.p'))
expected_outputs = pickle.load(open('y_att.p'))
predicted_outputs = []
test_inps = inputs[2688:2688+97]
test_outs = expected_outputs[2688:2688+97]
model = model_from_json(open('dense_fourier.json').read())
model.load_weights('weights_dense_fourier.h5')
freq_x_axis = np.fft.fftfreq(len(test_inps))
for i in range(len(test_inps)):
predicted_outputs.append(model.predict(np.array([test_inps[i]]))[0])
converted_expected = []
converted_predicted = []
a = [np.complex(test_outs[i][0], test_outs[i][1]) for i in range(len(test_outs))]
b = np.fft.ifft(a)
temp_complex = []
for i in range(len(test_inps)):
temp_complex.append(np.array([np.complex(predicted_outputs[i][0],predicted_outputs[i][1])]))
temp_complex = []
for i in range(len(test_inps)):
temp_complex.append(np.array([np.complex(predicted_outputs[i][0],predicted_outputs[i][1])]))
converted_predicted.append(np.array([np.complex(0,0)]))
converted_predicted.extend(np.fft.ifft(np.array(temp_complex)))
converted_predicted = np.array(converted_predicted)
print "hi"
plt.plot(b.real,label='Expected')
plt.plot(converted_predicted.real,label='Predicted')
plt.legend(loc='best')
plt.title('Expected vs Predicted Attach Rates for Test Week (Batch)')
plt.xlabel('Frequency')
plt.ylabel('Attach Rate')
plt.show()
#plt.savefig('LSTM_12_ts_10_epch_batch_mon.png')
| apache-2.0 |
Arcanewinds/FDL-LunarResources | CraterDetection/Polygon/craterDetector.py | 1 | 47437 | #Written by Timothy Seabrook
#[email protected]
#This whole script takes a bit too long to run and didn't end up being too effective.
#The basic idea is:
#1. Detect edges using a canny filter (This in itself isn't reliable enough)
#2. Group edges into 'shapes' permitting that some gaps may exist
#3. For each shape, use a line-of-fit split-and-merge strategy to form straight lines from pixels
#4. Convert shapes into graphs - lines to nodes and edges
#5. Find cycles in graphs to identify convex shapes
#6. Threshold convex shapes to identify craters
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import os
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
from scipy.sparse import csr_matrix
from graphCycles import Graph
import split_and_merge as sm
from PIL import Image
import glymur
import gdal
def edgeCluster(edges, max_step):
#edgeCluster algorithm
#Perform a walk from each edge pixel
#max_step determines how far a pixel can be for it
# to be considered part of the same edge
w, h = edges.shape[1], edges.shape[0] #size of search area
labels = np.zeros((h, w), dtype=np.uint32) #uint32 covers 0 to 4,294,967,295
data = np.where(edges)
nextLabel = 0 #Region ID (0 means unlabelled)
checkList = [] #Initialise checklist, contains pixels for neighbourhood traversability checks
num_total = len(data[0]) #Count number of valid unlabelled pixels
num_complete = 0 #Initialise counter
ind = 0
#BEGIN CONNECTED COMPONENTS ALGORITHM
while(num_complete < num_total):
nextLabel += 1 #Increment label class ID
y, x = data[0][ind], data[1][ind]
while(labels[y,x] != 0):
ind += 1
y, x = data[0][ind], data[1][ind]
labels[y,x] = nextLabel #Add next pixel to the new label class
if checkList.__len__() == 0: #Create a list of pixels for FloodFill neighbour checking
checkList = [[y, x]]
else:
checkList = checkList.append([y, x])
#BEGIN FLOODFILL ALGORITHM
while checkList.__len__() > 0: #Whilst there are qualifying pixels in this iteration of FloodFill
y, x = checkList.pop() #Take pixel from checklist, to find qualifying neighbours
num_complete += 1 #update count for timer
#BEGIN LOCATION SPECIFIC NEIGHBOUR INDEXING
if x > (max_step-1):
xmin = -max_step
if x < (w - max_step): #middle column
xmax = 1+max_step
else: #rightmost column
xmax = 1+(w-x-1)
else: #leftmost column
xmax = 1+max_step
xmin = -x
if y > (max_step-1):
ymin = -max_step
if y < (h - max_step): #middle row
ymax = 1+max_step
else: #bottom row
ymax = 1+(h-y-1)
else: #top row
ymax = 1+max_step
ymin = -y
#END LOCATION SPECIFIC NEIGHBOUR INDEXING
#BEGIN NEIGHBOUR TRAVERSABILITY CHECK
for i in range(xmin, xmax):
for j in range(ymin, ymax): #for all neighbouring pixels
if (((j == 0) & (i == 0))!=True): #not including current pixel
if(labels[y + j, x + i] == 0):
if edges[y+j,x+i] == True: #and only considering unlabeled pixels
labels[y+j,x+i] = nextLabel
checkList.append([y+j,x+i])
#END NEIGHBOUR TRAVERSABILITY CHECK
#END FLOODFILL ALGORITHM
#seeds = np.where(labels == 0) #Reset candidate seeds
#END CONNECTED COMPONENTS ALGORITHM
cols = np.arange(labels.size)
M = csr_matrix((cols, (labels.ravel(), cols)),
shape=(labels.max() + 1, labels.size))
indices = [np.unravel_index(row.data, labels.shape) for row in M]
counts = np.zeros((np.max(labels)+1))
for i in range(np.max(labels)+1):
counts[i] = indices[i][0].size
return indices, counts
#return labels #return labels and count
#base_folder = "/Volumes/DATA DISK/PDS_FILES/LROC_NAC/m108898482_cdr_w_jp2/"
#base_filename ="m108898482_cdr_jp2"
base_folder = "/Users/seabrook/Documents/FDL/FDL-LunarResources/PDS_FILES/LROC_NAC/"
base_filename = "M1106504662RE"
filename = base_folder+"P26_0-18000.txt"
d = []
with open(filename,'rb') as source:
for line in source:
fields = line.split('\t')
d.append(fields)
hypothesis = 4
num_nodes = 0
for n in range(len(d)-1):
#base_filename = d[n+1][0]
num_lil_craters = 0
num_craters = 0
num_bigcraters = 0
#curr_filename = filename+str(n+1)+'.jp2'
curr_filename = base_folder+base_filename+'.tif'
ds = gdal.Open(curr_filename)
image = np.array(ds.GetRasterBand(1).ReadAsArray())
#curr_filename = base_folder+base_filename+'_p'+str(n+1)+'.tif'
#if not os.path.isdir(base_folder + 'p' + str(n + 1) + "/"):
# os.mkdir(base_folder + 'p' + str(n + 1) + "/")
# Load picture and detect edges
#image = glymur.Jp2k(curr_filename)[:]
# Low threshold and High threshold represent number of pixels that may be skipped to make a line [4, 60 seems good]
# Sigma represents the width of the guassian smoothing kernel [3 seems good]
edges = canny(image, sigma=3, low_threshold=4, high_threshold=50)
#fig, axarr = plt.subplots(ncols=2, nrows=1, figsize=(10, 4))
#axarr[1].imshow(image, cmap=plt.cm.gray)
#plt.show()
lines, counts = edgeCluster(edges,3)
#segments = np.zeros(len(lines))
segmentParent = np.zeros(len(lines), dtype=int)
#data = np.where(edges)
for i in range(1,len(lines)):
if i == 1:
segments = sm.split_and_merge(lines[i], 1)
segmentParent[i] = len(segments)
else:
segments = np.hstack((segments, sm.split_and_merge(lines[i], 0.5)))
segmentParent[i] = segments.size
#cm = plt.get_cmap('gist_rainbow')
#fig1, axarr = plt.subplots(ncols=2, nrows=1)
#axarr[0].imshow(edges, cmap=plt.cm.gray)
#axarr[1].imshow(image, cmap=plt.cm.gray)
#axarr[1].set_color_cycle([cm(1. * i / 20) for i in range(20)])
#for i in range(1,len(lines)):
# y, x = lines[i]
# axarr[1].scatter(x, y, alpha=0.8, edgecolors='none', s=1)
#fig2, axarr = plt.subplots(ncols=2, nrows=1)
#axarr[0].imshow(image, cmap=plt.cm.gray)
#axarr[1].imshow(image, cmap=plt.cm.gray)
#For every grouped line
nodes = []
for i in range(1,len(lines)):
first = segmentParent[i-1]
last = segmentParent[i]
#For every segment of line
#plt.axes(axarr[0])
for j in range(first,last):
sm.generate_line_ends(segments[j])
# plt.plot([segments[j].start[1], segments[j].end[1]], [segments[j].start[0], segments[j].end[0]], 'r-')
#Hypothesis 1
# proposal: extend all lines by a scalar value to encourage intersection
# result: poor, some lines that already intersect do not need additional reach
# some lines require larger reach still to make important intersections
# conclusion: We require a dynamic value per line, based on context?
#
#Hypothesis 2
# proposal: where two lines can intersect if extended by max((end-mean/2),max_extend)
# they should be
# result: decent, large lines extend too far, most 'easy' craters get captured.
# conclusion: distance between ends of lines is probably better than distance to intersection
#
#If a line can be extended to intersect another, within the bounds of the others data points
#Then it should do so.
#Max extension (in x) permissible for each of two lines to intersect
##############################################################################
if(hypothesis == 2):
max_extend = 5
for j in range(first, last):
for k in range(first,last):
if(j < k):
#Do these lines intersect?
if(segments[j].slope[0] == segments[k].slope[0]):
#They never intersect
intersect = False
else:
#They intersect at [x_cross, y_cross]
#a1x + b1 = a2x + b2
#(a1 - a2)x = (b2 - b1)
#x = (b2-b1)/(a1-a2)
x_cross = np.divide((segments[k].intercept - segments[j].intercept),\
(segments[j].slope[0] - segments[k].slope[0]))
#y = ax + b
y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept
#Check that intersection point lies within bounds of map
if((x_cross > 0) & (x_cross < edges.shape[0]) & (y_cross > 0) & (y_cross < edges.shape[1])):
#If x_cross is outside of segment j's maximal bounds
if (x_cross > segments[j].max[0]):
#check that x_cross is close enough to j to warrant intersection
if ((x_cross - segments[j].max[0]) < np.maximum(np.multiply(0.5,(
np.max(segments[j].data[0]) - segments[j].mean[0])),max_extend)):
#If x_cross is outside of segment k's maximals bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5, (
np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)):
#If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
#update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))),max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
#update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else: #x_cross is within bounds of k
# update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else:
# If x_cross is outside of segment j's minimal bounds
if (x_cross < segments[j].min[0]):
# check that x_cross is close enough to j to warrant intersection
if((segments[j].min[0] - x_cross) < np.maximum(np.multiply(0.5,(
segments[j].mean[0] - np.min(segments[j].data[0]))),max_extend)):
# If x_cross is outside of segment k's maximal bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5,(
np.max(segments[k].data[0]) - segments[k].mean[0])),max_extend)):
# If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
# update j(min)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
# update j(min)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else: #x_cross is within bounds of k
# update j(max)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else: #x_cross is within bounds of j
# If x_cross is outside of segment k's maximals bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5,
(np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)):
# If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
#else: # x_cross is within bounds of k
##############################################################################
# Hypothesis 3
# proposal: Connecting the ends of lines will provide more sensible connections
# than connecting intersections
# result: Compact groups, lots of unnecessary crossing lines.
# conclusion: Most lines only need to connect once at each end
if(hypothesis == 3):
max_extend = 6
changeFlag = True
connected = np.zeros((last - first, last - first), dtype=bool)
while(changeFlag):
changeFlag = False
for j in range(first, last):
for k in range(first,last):
if(j < k):
if(connected[j-first,k-first] == False):
#First, do these lines already intersect?
if (segments[j].slope[0] == segments[k].slope[0]):
# They never intersect
intersect = False
else:
x_cross = np.divide((segments[k].intercept[0] - segments[j].intercept[0]),
(segments[j].slope[0] - segments[k].slope[0]))
# y = ax + b
y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept[0]
intersect = False
#if((x_cross > segments[k].min[0]) & (x_cross > segments[j].min[0])
# & (x_cross < segments[k].max[0]) & (x_cross < segments[j].max[0])):
# intersect = True
# connected[j-first,k-first] = True
# connected[k-first,j-first] = True
if(intersect == False):
#Are the ends of these lines close together?
distance = np.zeros(4)
#min -> min
distance[0] = np.sqrt(np.sum((np.power(segments[j].start[0] - segments[k].start[0],2),
np.power((segments[j].start[1] - segments[k].start[1]), 2))))
#min -> max
distance[1] = np.sqrt(np.sum((np.power((segments[j].start[0] - segments[k].end[0]),2),
np.power((segments[j].start[1] - segments[k].end[1]), 2))))
#max -> min
distance[2] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].start[0]),2),
np.power((segments[j].end[1] - segments[k].start[1]), 2))))
#max -> max
distance[3] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].end[0]),2),
np.power((segments[j].end[1] - segments[k].end[1]), 2))))
ind = np.argmin(distance)
if distance[ind] < max_extend:
if(distance[ind] == 0):
connected[j - first, k - first] = True
connected[k - first, j - first] = True
else:
changeFlag = True
switcher = {
0: [[segments[j].start[0], segments[j].start[1]], [segments[k].start[0], segments[k].start[1]]],
1: [[segments[j].start[0], segments[j].start[1]], [segments[k].end[0], segments[k].end[1]]],
2: [[segments[j].end[0], segments[j].end[1]], [segments[k].start[0], segments[k].start[1]]],
3: [[segments[j].end[0], segments[j].end[1]], [segments[k].end[0], segments[k].end[1]]],
}
data = switcher.get(ind)
connected[j - first, k - first] = True
connected[k - first, j - first] = True
segments = np.insert(segments, last, sm.line_of_best_fit(data))
segments[last].start = [data[0][0], data[0][1]]
segments[last].end = [data[1][0], data[1][1]]
segmentParent[i:] = segmentParent[i:]+1
##############################################################################
# Hypothesis 4
# proposal: A greedy search for new end-of-line connections up to a maximum of 1 connection at each end
# Followed by a greedy search for loose end-of-line connections
# result: Much tidier groups, though lines appear jittery.
# conclusion: It might be better to move nodes rather than draw new edges.
if (hypothesis == 4):
big_number = 9999999999999
max_extend = 6
connected_lines = np.zeros(last - first,dtype=bool)
connected = np.zeros((last-first, last-first),dtype=bool)
#for j in range(first, last):
# for k in range(first, last):
# if (j < k):
# First, do these lines already intersect?
#if (segments[j].slope[0] == segments[k].slope[0]):
# They never intersect, but could connect
# if(segments[j].intercept[0] == segments[k].intercept[0]):
#They are on the same line
#Only need to check x value equality, since lines are parallel
# if(((segments[j].start[0] >= segments[k].start[0])
# & (segments[j].start[0] <= segments[k].end[0]))
# ^ ((segments[j].start[0] >= segments[k].end[0])
# & (segments[j].start[0] <= segments[k].start[0]))):
## segments[j].start_connect = k
# connected[j-first, k-first] = True
## connected[k-first, j-first] = True
# if (((segments[j].end[0] >= segments[k].start[0])
# & (segments[j].end[0] <= segments[k].end[0]))
# ^ ((segments[j].end[0] >= segments[k].end[0])
# & (segments[j].end[0] <= segments[k].start[0]))):
# segments[j].end_connect = k
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True
# if (((segments[k].start[0] >= segments[j].start[0])
# & (segments[k].start[0] <= segments[j].end[0]))
# ^ ((segments[k].start[0] >= segments[j].end[0])
# & (segments[k].start[0] <= segments[j].start[0]))):
# segments[k].start_connect = j
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True
# if (((segments[k].end[0] >= segments[j].start[0])
### & (segments[k].end[0] <= segments[j].end[0]))
# ^ ((segments[k].end[0] >= segments[j].end[0])
# & (segments[k].end[0] <= segments[j].start[0]))):
# segments[k].end_connect = j
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True#
# The next pair of conditions should NEVER occur
# However, the check has been included for sanity
# if((segments[j].end_connect == k)
# & (segments[j].start_connect == k)):
# #(Line j < Line k) ^ (Line j = Line k)
# np.delete(segments, j, 0)
# last = last - 1
# segmentParent[i:] = segmentParent[i:] + -1
# np.delete(connected_lines, j-first, 0)
# np.delete(connected, j-first, 0)
# np.delete(connected, j-first, 1)
# else:
# if ((segments[k].end_connect == j)
# & (segments[k].start_connect == j)):
# #Line k < Line j
# np.delete(segments, k, 0)
# last = last - 1
# segmentParent[i:] = segmentParent[i:] + -1
# np.delete(connected_lines, k-first, 0)
# np.delete(connected, k-first, 0)
# np.delete(connected, k-first, 1)
#The lines are not parallel, continue intersection check
#else:
# x = (b2 - b1)/(a1 - a2)
# x_cross = np.rint(np.divide(
# (segments[k].intercept[0] - segments[j].intercept[0]),
# (segments[j].slope[0] - segments[k].slope[0])))
#This introduces bugs due to errors introduced through division
#Rounding could help, but the direction of rounding would need to be know
#if ((x_cross >= segments[k].min[0]) & (x_cross >= segments[j].min[0])
# & (x_cross <= segments[k].max[0]) & (x_cross <= segments[j].max[0])):
# #Lines intersect!
# #But where...?
# if(abs(segments[k].end[0] - x_cross) < abs(segments[k].start[0] - x_cross)):
# segments[k].end_connect = j
# else:
# segments[k].start_connect = j
# if(abs(segments[j].end[0] - x_cross) < abs(segments[j].start[0] - x_cross)):
# segments[j].end_connect = k
# else:
# segments[j].start_connect = k
# connected[j-first,k-first] = True
# connected[k-first,j-first] = True
#If start and end of line is connected, then do not connect them again
#for j in range(first, last):
# if ((segments[j].start_connect >= 0) & (segments[j].end_connect >= 0)):
# connected_lines[j-first] = True
#Find lines that haven't been fully connected yet
unconnected = np.where(connected_lines == False)[0]+first
num_lines = unconnected.shape[0]
#Build adjacency matrix for lines that haven't been connected
line_adjacency = np.zeros((num_lines, num_lines,4), dtype=float)
#For lines that haven't been fully connected...
##########Calculate line end distances
for j in range(num_lines):
for k in range(num_lines):
if j < k:
#Not considering joined pairs of partially connected lines
if(connected[j,k] == True):
line_adjacency[j,k,0] = big_number
line_adjacency[j,k,1] = big_number
line_adjacency[j,k,2] = big_number
line_adjacency[j,k,3] = big_number
else:
#Measure the distance between the ends of the lines
#Ensure that lines are unconnected before measuring distance
# start -> start
line_adjacency[j,k,:] = sm.line_distances(segments[unconnected[j]],segments[unconnected[k]])
else:
if(j == k):
line_adjacency[j, k, 0] = big_number
line_adjacency[j, k, 1] = big_number
line_adjacency[j, k, 2] = big_number
line_adjacency[j, k, 3] = big_number
else:
# If line has already been processed, copy distance values
line_adjacency[j, k,0] = line_adjacency[k, j,0]
line_adjacency[j, k,1] = line_adjacency[k, j,2]
line_adjacency[j, k,2] = line_adjacency[k, j,1]
line_adjacency[j, k,3] = line_adjacency[k, j,3]
connect_flag = True
l = 0
#Whilst there are still partially connected lines less than [max_extend] distance apart
while(connect_flag == True):
#Find the shortest distance (greedy strategy)
# argmin gives flatIndex,
# use unravel_index with array shape to return 3d index
#If the shortest distance is acceptable
if line_adjacency.size == 0:
connect_flag = False
else:
j, k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape)
if line_adjacency[j,k,l] < max_extend:
if(line_adjacency[j,k,l] == 0):
node = sm.attach_lines(segments[unconnected[j]], segments[unconnected[k]], l)
if (node.id >= num_nodes):
nodes.append(node)
num_nodes += 1
connected[k, j] = True
connected[j, k] = True
line_adjacency[j, k, :] = big_number
line_adjacency[k, j, :] = big_number
else:
#Create a new line to bridge the distance
segments = np.insert(segments, last,
sm.connect_lines(segments[unconnected[j]], segments[unconnected[k]], l))
if (segments[last].nodes[0] is not None):
if (segments[last].nodes[0].id >= num_nodes):
nodes.append(segments[last].nodes[0])
num_nodes += 1
if (segments[last].nodes[1] is not None):
if (segments[last].nodes[1].id >= num_nodes):
nodes.append(segments[last].nodes[1])
num_nodes += 1
segmentParent[i:] = segmentParent[i:] + 1
connected = np.hstack((connected, np.zeros((last-first, 1), dtype=bool)))
connected = np.vstack((connected, np.zeros((1,last-first+1), dtype=bool)))
connected[k, last-first] = True
connected[j, last-first] = True
connected[last-first, k] = True
connected[last-first, j] = True
connected[k,j] = True
connected[j,k] = True
line_adjacency[j, k, :] = big_number
line_adjacency[k, j, :] = big_number
#Adjacency switcher is used to select relevant line_adjacency values
#For each 'connection made type' row:
#First values identify connections types that line1 can no longer make
#Second values identify connections types that line2 can no longer make
#Third values identify connections types that j can no longer receive
#Fourth values identify connections types that k can no longer receive
adjacency_switcher = {
0: [[0, 1],[0, 1],[0, 2],[0, 2]], #Type start->start
1: [[0, 1],[2, 3],[0, 2],[1, 3]], #Type start->end
2: [[2, 3],[0, 1],[1, 3],[0, 2]], #Type end->start
3: [[2, 3],[2, 3],[1, 3],[1, 3]], #Type end->end
}
inds = adjacency_switcher[l]
line_adjacency[j,:,inds[0]] = big_number
line_adjacency[k,:,inds[1]] = big_number
line_adjacency[:,j,inds[2]] = big_number
line_adjacency[:,k,inds[3]] = big_number
last = last + 1
diff = 0
if ((segments[unconnected[j]].start_connect >= 0) & (segments[unconnected[j]].end_connect >= 0)):
connected_lines[j] = True
unconnected = np.delete(unconnected, j, 0)
line_adjacency = np.delete(line_adjacency, j, 0)
line_adjacency = np.delete(line_adjacency, j, 1)
num_lines = num_lines - 1
if k > j:
diff = 1
if ((segments[unconnected[k-diff]].start_connect >= 0) & (segments[unconnected[k-diff]].end_connect >= 0)):
connected_lines[k] = True
unconnected = np.delete(unconnected, k-diff, 0)
line_adjacency = np.delete(line_adjacency, k-diff, 0)
line_adjacency = np.delete(line_adjacency, k-diff, 1)
num_lines = num_lines - 1
else:
connect_flag = False
#Now there are only partially connected lines remaining
#We should see if these can connect to any nearby lines
num_remain = unconnected.shape[0]
#unconnected have been being deleted upon full-connection during previous step
line_adjacency = np.zeros((last-first, 4))
#max_extend = 10
for j in range(num_remain):
for k in range(last-first):
#Cannot connect to self
if(unconnected[j] == k+first):
line_adjacency[k, :] = big_number
else:
#Cannot reconnect over previously connections
if(connected[unconnected[j]-first,k] == True):
line_adjacency[k,:] = big_number
else:
#Measure distance to all other ends of lines
if(segments[unconnected[j]].start_connect < 0):
line_adjacency[k, 0] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].start)
line_adjacency[k, 1] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].end)
else:
line_adjacency[k, 0] = big_number
line_adjacency[k, 1] = big_number
if(segments[unconnected[j]].end_connect < 0):
line_adjacency[k, 2] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].start)
line_adjacency[k, 3] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].end)
else:
line_adjacency[k, 2] = big_number
line_adjacency[k, 3] = big_number
# sm.line_distances(segments[unconnected[j]],segments[k+first])
k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape)
#If shortest distance is below threshold, make connection
if line_adjacency[k,l] < max_extend:
if (line_adjacency[k,l] == 0): #If shortest distance indicates prior connection, form connection formally
connected[unconnected[j] - first, k] = True
connected[k, unconnected[j] - first] = True
node = sm.attach_lines(segments[unconnected[j]], segments[k+first], l)
if (node.id >= num_nodes):
nodes.append(node)
num_nodes += 1
else:
changeFlag = True
segments = np.insert(segments, last,
sm.connect_lines(segments[unconnected[j]], segments[k+first], l))
if (segments[last].nodes[0] is not None):
if (segments[last].nodes[0].id >= num_nodes):
nodes.append(segments[last].nodes[0])
num_nodes += 1
if (segments[last].nodes[1] is not None):
if (segments[last].nodes[1].id >= num_nodes):
nodes.append(segments[last].nodes[1])
num_nodes += 1
connected[unconnected[j] - first, k] = True
connected[k, unconnected[j] - first] = True
segmentParent[i:] = segmentParent[i:] + 1
connected = np.hstack((connected, np.zeros((last - first, 1), dtype=bool)))
connected = np.vstack((connected, np.zeros((1, last - first + 1), dtype=bool)))
connected[k, last-first] = True
connected[unconnected[j]-first, last-first] = True
connected[last-first, k] = True
connected[last-first, unconnected[j]-first] = True
line_adjacency[k, :] = big_number
if((k+first) in unconnected):
line_adjacency[np.where(unconnected==(k+first))[0]] = big_number
line_adjacency = np.vstack((line_adjacency, np.multiply(np.ones((1,4)),big_number)))
last = last + 1
#print(checkCycles(segments[first:last]))
#plt.axes(axarr[1])
#axarr[1].imshow(image, cmap=plt.cm.gray)
#for m in range(first):
# plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'r-')
#
# for m in range(first,last):
# plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'g-')
graph = sm.getEdges(segments[first:last])
nodes2 = sm.getNodes(segments[first:last])
cycles = sm.find_nxCycle(graph)
#print(cycles)
cycles = sm.mergeCycles(cycles)
boxes = sm.findBounds(cycles, nodes2)
for box in boxes:
coord, width, height = sm.boxToMatplotPatch(box)
# axarr[1].add_patch(
# patches.Rectangle(
# coord, width, height,#(x,y), width, height
# fill=False
# )
# )
im = image[
int(np.maximum(0, coord[1] - np.floor_divide(height, 4))):int(
np.minimum(coord[1] + height + np.floor_divide(height, 4), edges.shape[0])),
int(np.maximum(0, coord[0] - np.floor_divide(width, 4))):int(
np.minimum(coord[0] + width + np.floor_divide(width, 4), edges.shape[1]))]
if ((width >= 20) & (height >= 20)):
# Big enough for 1px DEM
if ((width > 160) & (height > 160)):
# Big enough for 8px DEM
# coord = x1,y1
# x2 = x1+width
# y2 = y1+height
filename = base_folder + "craters/" + base_filename + '_big_crater' + str(
num_bigcraters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_bigcraters = num_bigcraters + 1
else:
filename = base_folder + "craters/" + base_filename + '_crater' + str(
num_craters) + '_at_x' + str(int(np.floor_divide(coord[0],1))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_craters = num_craters + 1
else:
filename = base_folder + "craters/" + base_filename + '_little_crater' + str(
num_lil_craters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width)))+ 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_lil_craters = num_lil_craters + 1
im2 = Image.fromarray(im)
im2.save(filename + '.png')
#cycles = sm.findCycles(drawGraph(segments[first:last]))
#if (len(cycles) > 0):
# print(cycles)
#y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0]
#a = np.divide(np.ones(len(line.slope)), line.slope)
#b = y1 - np.multiply(a, minX)
#x2 = np.divide(line.intercept - b, a - line.slope)
#y2 = (line.slope * x2) + line.intercept
#if x2 < minX:
# minX = x2
#if y2 < minY:
# minY = y2
#x1 = (np.divide((minY - line.intercept),line.slope))[0][0]
#y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0]
#x2 = (np.divide((maxY - line.intercept), line.slope))[0][0]
#y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0]
#if(y1 > minY):
# y1 = minY
#x1 = minX
#y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0]
#if(y2 < maxY):
# y2 = maxY
#x2 = maxX
#for line in segments:
# If negative correlation, then [minX, maxY], [maxX, minY]
# plt.plot([line.start[1], line.end[1]], [line.start[0], line.end[0]], 'r-')
#if (line.slope[0] > 0):
# plt.plot([line.min[1], line.max[1]], [line.min[0], line.max[0]], 'r-')
#else:
# plt.plot([line.min[1], line.max[1]], [line.max[0], line.min[0]], 'r-')
print("end") | gpl-3.0 |
ssalesky/Science-Library | sci_lib.py | 1 | 4258 | #!/usr/bin/python
#Author: Scott T. Salesky
#Email: [email protected] / [email protected]
#Created: 12.6.2014 / Updated: 12.6.2014
#Purpose: Collection of useful Python classes,
#routines, and functions for scientific work
#----------------------------------------------------------
#Import all required packages
import numpy as np #Numpy
from matplotlib.colors import Normalize #Normalize class
import matplotlib.pyplot as plt #Matplotlib.pyplot
from matplotlib.ticker import MultipleLocator, \
AutoLocator, AutoMinorLocator #Tick locations
#from matplotlib import ticker #Ticker
#----------------------------------------------------------
#Functions for reading/manipulating data
#----------------------------------------------------------
def read_f90_bin(path,nx,ny,nz,precision):
"""Reads Fortran binary direct access files into Numpy.
path => path to file to read
(nx,ny,nz) => grid dimensions
precison => (=4 single), (=8 double)
Returns dat[nx,ny,nz] as numpy array.
"""
#Open file
f=open(path,'rb')
#Pass data to numpy array
if (precision==4):
dat=np.fromfile(f,dtype='float32',count=nx*ny*nz)
elif (precision==8):
dat=np.fromfile(f,dtype='float64',count=nx*ny*nz)
else:
raise ValueError('Precision must be 4 or 8')
#Reshape array
dat=np.reshape(dat,(nx,ny,nz),order='F')
f.close()
return dat
#----------------------------------------------------------
#Classes and functions for generating plots with Matplotlib
#----------------------------------------------------------
def set_ticks(ax,xmaj=None,xmin=None,ymaj=None,ymin=None):
"""Sets major/minor axis tickmarks at specified increments.
ax => Axis handle (e.g. ax=plt.subplot(111))
xmaj,ymaj => Major tick frequency (optional)
xmin,ymin => Minor tick frequency (optional)
Example Usage:
ax=subplot(111)
plt.plot(x,y)
set_ticks(ax,xmaj=5,xmin=1,ymaj=1,ymin=0.2)
"""
if xmaj is None:
ax.xaxis.set_major_locator(AutoLocator())
else:
ax.xaxis.set_major_locator(MultipleLocator(xmaj))
if ymaj is None:
ax.yaxis.set_major_locator(AutoLocator())
else:
ax.yaxis.set_major_locator(MultipleLocator(ymaj))
if xmin is None:
ax.xaxis.set_minor_locator(AutoMinorLocator())
else:
ax.xaxis.set_minor_locator(MultipleLocator(xmin))
if ymin is None:
ax.yaxis.set_minor_locator(AutoMinorLocator())
else:
ax.yaxis.set_minor_locator(MultipleLocator(ymin))
def gen_colorlist(n,cmap):
"""Returns list of n colors evenly spaced from a given colormap.
Useful for making a line plot with n lines with colors that are
evenly spaced according to a given colormap.
n => number of colors to return
cmap => colormap (e.g. from pyplot.cm.colormapname)
returns colorlist => n tuples corresponding to colors
Example Usage:
colorlist=gen_colorlist(ncolors,colormapname)
for i in range(n):
plt.plot(data[:,0],dat[:,i+1],color=colorlist[i],args*)
"""
colorlist=[]
vals=np.linspace(1.0/n,1.0,n)
for i in range(n):
colorlist.append(cmap(vals[i]))
return colorlist
class MidPointNormalize(Normalize):
"""Defines the midpoint of diverging colormap.
Usage: Allows one to adjust the colorbar, e.g.
using contouf to plot data in the range [-3,6] with
a diverging colormap so that zero values are still white.
Example usage:
norm=MidPointNormalize(midpoint=0.0)
f=plt.contourf(X,Y,dat,norm=norm,cmap=colormap)
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
| mit |
siutanwong/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
dhomeier/astropy | astropy/visualization/tests/test_histogram.py | 2 | 2269 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_PLT, HAS_SCIPY
if HAS_PLT:
import matplotlib.pyplot as plt
import pytest
import numpy as np
from astropy.visualization import hist
from astropy.stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [9.99999914e+05, -8.31312483e-03, 6.52755852e-02, 1.43104653e-03,
-2.26311017e-02, 2.82660007e-03, 1.80307521e-02, 9.26294279e-03,
5.06606026e-02, 2.05418011e-03]
with pytest.raises(ValueError):
hist(data, bins='freedman', max_bins=10000)
| bsd-3-clause |
GuLinux/PySpectrum | old/fitscalibration.py | 1 | 3743 | #!/bin/python
import astropy
from astropy.io import fits
import sys
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy.ndimage.interpolation
from matplotlib.widgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import QSettings
from calibrate_dialog import *
class Calibrate:
def __init__(self, fits_file):
self.starting_wavelength = fits_file[0].header['CRVAL1'] if 'CRVAL1' in fits_file[0].header else 0
self.dispersion = fits_file[0].header['CDELT1'] if 'CDELT1' in fits_file[0].header else 1
self.config = QSettings('GuLinux', 'PySpectra')
self.fits_file = fits_file
self.__init_plot__()
self.bcalibrate = Button(self.image_plot.figure.add_axes([0, 0.95, 0.1, 0.05]), "Calibrate")
self.bsave = Button(self.image_plot.figure.add_axes([0.1 , 0.95, 0.1, 0.05]), "Save")
self.bcalibrate.on_clicked(lambda ev: self.calibrate() )
self.bsave.on_clicked(lambda ev: self.save() )
self.calibrate_dialog = QDialog()
self.calibrate_dialog_ui = Ui_Calibrate()
self.calibrate_dialog_ui.setupUi(self.calibrate_dialog)
self.calibrate_dialog_ui.first_point_lambda.setRange(0, 50000)
self.calibrate_dialog_ui.second_point_lambda.setRange(0, 50000)
self.calibrate_dialog_ui.first_point_pixel.setRange(0, self.data().size)
self.calibrate_dialog_ui.second_point_pixel.setRange(0, self.data().size)
self.calibrate_dialog.accepted.connect(self.calibrated)
plt.show()
def calibrate(self):
self.calibrating = True
self.calibrate_dialog.show()
def calibrated(self):
self.calibrating = False
self.dispersion = (self.calibrate_dialog_ui.second_point_lambda.value() - self.calibrate_dialog_ui.first_point_lambda.value()) / (self.calibrate_dialog_ui.second_point_pixel.value() - self.calibrate_dialog_ui.first_point_pixel.value())
self.starting_wavelength = self.calibrate_dialog_ui.first_point_lambda.value() - (self.calibrate_dialog_ui.first_point_pixel.value() * self.dispersion)
header = self.fits_file[0].header
header['CRPIX1'] = 1
header['CRVAL1'] = self.starting_wavelength
header['CDELT1'] = self.dispersion
self.draw_plot()
print("calibrate: starting_wavelength={}, dispersion={}".format(self.starting_wavelength, self.dispersion))
def data(self):
return self.fits_file[0].data
def __init_plot__(self):
plt.figure()
self.image_plot = plt.axes()
self.draw_plot()
def draw_plot(self):
self.image_plot.clear()
x_axis = np.arange(0, self.data().size) * self.dispersion + self.starting_wavelength
self.image_plot.plot(x_axis, self.data())
self.image_plot.figure.canvas.draw()
def save(self):
save_file = QFileDialog.getSaveFileName(None, "Save plot...", self.config.value('last_save_dir'), "FITS file (.fit)")
if not save_file[0]:
return
filename = save_file[0]
self.fits_file.writeto(filename, clobber=True)
# TODO: plotting calibrated spectrum; colormap as following:
# plt.cm.nipy_spectral(1.0)
# Main
if len(sys.argv) < 2:
print("Usage: {} image.fit".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
app = QApplication(sys.argv)
image_file = sys.argv[1]
print("Reading image file: " + image_file)
image_hdu = fits.open(image_file)
print(image_hdu.info())
for hdu in image_hdu:
print(repr(hdu.header))
img_data = image_hdu[0].data
print("Type: {}, size: {}, ndim: {}, dtype: {}, shape: {}".format(type(img_data),img_data.size, img_data.ndim, img_data.dtype, img_data.shape) )
calibrate = Calibrate(image_hdu)
| gpl-3.0 |
AlexRobson/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
foxsi/foxsi-smex | pyfoxsi/src/pyfoxsi/response/response.py | 4 | 8272 | """
Response is a module to handle the response of the FOXSI telescopes
"""
from __future__ import absolute_import
import pandas as pd
import numpy as np
import warnings
import os
import matplotlib.pyplot as plt
import astropy.units as u
from scipy import interpolate
import pyfoxsi
import h5py
__all__ = ['Response', 'Material']
class Response(object):
"""An object which provides the FOXSI telescope response
Parameters
----------
shutter_state : int, default 0
A number representing the state of the shutter (0 - no shutter, 1 - thin shutter, 2 - thick shutter)
configuration : int, default 1
Choose the optics configuration
1 : 15 meters
2 : 10 meters 3 modules
3 : 10 meters 2 modules
Examples
--------
>>> from pyfoxsi.response import Response
>>> resp = Response()
>>> resp1 = Response(shutter_state=1)
"""
def __init__(self, shutter_state=0, configuration=1):
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'effective_area_per_module.csv'
effarea_file = os.path.join(path, filename)
optics_effective_area = pd.read_csv(effarea_file, index_col=0, skiprows=4)
optics_effective_area = optics_effective_area[optics_effective_area.columns[configuration-1]]
if configuration == 1:
pyfoxsi.focal_length = 15 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 2:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 3:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 2
self.optics_effective_area = pd.DataFrame(dict(total=optics_effective_area.copy(),
module=optics_effective_area.copy()))
# find what shells are missing
#shell_numbers = np.array(self._eff_area_per_shell.columns, np.uint)
#missing_shells = np.setdiff1d(shell_numbers, pyfoxsi.shell_ids)
# remove the missing shells
self.__number_of_telescopes = 1
#for missing_shell in missing_shells:
# self._eff_area_per_shell.drop(str(missing_shell), 1, inplace=True)
# now add the effective area of all of the shells together
#self.optics_effective_area = pd.DataFrame({'module': self._eff_area_per_shell.sum(axis=1), 'total': self._eff_area_per_shell.sum(axis=1)})
self.effective_area = pd.DataFrame(dict(total=self.optics_effective_area['total'].copy(), module=self.optics_effective_area['module'].copy()))
self.number_of_telescopes = pyfoxsi.number_of_telescopes
self._set_default_optical_path()
if shutter_state > 0:
self.__optical_path.append(Material('al', pyfoxsi.shutters_thickness[shutter_state]))
self.__shutter_state = shutter_state
self._add_optical_path_to_effective_area()
def plot(self, axes=None):
"""Plot the effective area"""
if axes is None:
axes = plt.gca()
a = self.effective_area.plot(axes=axes)
axes.set_title(pyfoxsi.mission_title + ' ' + str(self.number_of_telescopes) + 'x ' + 'Shutter State ' + str(self.shutter_state))
axes.set_ylabel('Effective area [cm$^2$]')
axes.set_xlabel('Energy [keV]')
def _set_default_optical_path(self):
self.__optical_path = [Material('mylar', pyfoxsi.blanket_thickness),
Material(pyfoxsi.detector_material, pyfoxsi.detector_thickness)]
@property
def number_of_telescopes(self):
"""The total number of telescope modules"""
return self.__number_of_telescopes
@number_of_telescopes.setter
def number_of_telescopes(self, x):
self.optics_effective_area['total'] = self.optics_effective_area['total'] / self.__number_of_telescopes * x
self.__number_of_telescopes = x
@property
def optical_path(self):
"""The materials in the optical path including the detector"""
return self.__optical_path
@optical_path.setter
def optical_path(self, x):
self.optical_path = x
self._add_optical_path_to_effective_area()
@property
def shutter_state(self):
"""The shutter state, allowed values are 0, 1, 2"""
return self.__shutter_state
@shutter_state.setter
def shutter_state(self, x):
raise AttributeError('Cannot change shutter state. Create new object with desired shutter state')
def _add_optical_path_to_effective_area(self):
"""Add the effect of the optical path to the effective area"""
energies = np.array(self.optics_effective_area.index)
# Remove 10% of flux due to spiders
factor = np.ones_like(energies) * 0.9
# Apply all of the materials in the optical path to factor
for material in self.optical_path:
print(material.name)
if material.name == pyfoxsi.detector_material:
# if it is the detector than we want the absorption
factor *= material.absorption(energies)
else:
factor *= material.transmission(energies)
self.effective_area['factor'] = factor
self.effective_area['total'] = factor * self.optics_effective_area['total']
self.effective_area['module'] = factor * self.optics_effective_area['module']
class Material(object):
"""An object which provides the optical properties of a material in x-rays
Parameters
----------
material : str
A string representing a material (e.g. cdte, be, mylar, si)
thickness : `astropy.units.Quantity`
The thickness of the material in the optical path.
Examples
--------
>>> from pyfoxsi.response import Material
>>> import astropy.units as u
>>> detector = Material('cdte', 500 * u.um)
>>> thermal_blankets = Material('mylar', 0.5 * u.mm)
"""
def __init__(self, material, thickness):
self.name = material
self.thickness = thickness
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'mass_attenuation_coefficient.hdf5'
data_file = os.path.join(path, filename)
h = h5py.File(data_file, 'r')
data = h[self.name]
self._source_data = data
self.density = u.Quantity(self._source_data.attrs['density'], self._source_data.attrs['density unit'])
data_energy_kev = np.log10(self._source_data[0,:] * 1000)
data_attenuation_coeff = np.log10(self._source_data[1,:])
self._f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff, bounds_error=False, fill_value=0.0)
self._mass_attenuation_coefficient_func = lambda x: 10 ** self._f(np.log10(x))
def __repr__(self):
"""Returns a human-readable representation."""
return '<Material ' + str(self.name) + ' ' + str(self.thickness) + '>'
def transmission(self, energy):
"""Provide the transmission fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV
"""
coefficients = self._mass_attenuation_coefficient_func(energy) * u.cm ** 2 / u.gram
transmission = np.exp(- coefficients * self.density * self.thickness)
return transmission
def absorption(self, energy):
"""Provides the absorption fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV.
"""
return 1 - self.transmission(energy)
def plot(self, axes=None):
if axes is None:
axes = plt.gca()
energies = np.arange(1, 60)
axes.plot(energies, self.transmission(energies), label='Transmission')
axes.plot(energies, self.absorption(energies), label='Absorption')
axes.set_ylim(0, 1.2)
axes.legend()
axes.set_title(self.name + ' ' + str(self.thickness))
axes.set_xlabel('Energy [keV]')
| mit |
lionelliang/PairTradingSpark | check-pairtrading-spark-optimised.py | 1 | 13079 | #coding:utf-8
## Spark Application - execute with spark-submit
## Imports
import csv
import os
import time
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.tsa.stattools as sts
from StringIO import StringIO
from pyspark import SparkConf, SparkContext
import pymongo_spark
from pymongo import MongoClient
pymongo_spark.activate()
## Module Constants
APP_NAME = "ADF Spark Application"
TABLE_STOCKS_BASIC = 'stock_basic_list'
TABLE_STOCKS_PAIRS = 'stock_pairing_list45'
TABLE_WEIGHT = 'stock_linrreg.csv'
DownloadDir = './stockdata/'
weightdict = {} #previous weight dict broadcast
#mongo db config
MONGO_HOST = '127.0.0.1'
MONGO_TABLE_WEIGHT = 'stock.linrreg'
MONGO_TABLE_WEIGHT_SAVED = 'stock.linrregsaved'
MONGO_TABLE_STOCKS_PAIRS = 'stock.pairs'
MONGO_TABLE_STOCKS_PAIRS_ALL = 'stock.pairsall'
MONGO_DB_QUOTATION = 'quotation'
MONGO_TABLE_PREFIX = 'kline_'
## Closure Functions
#date example 2011/10/13
tudateparser = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
def save_stk_pairings():
stock_list = pd.read_csv(TABLE_STOCKS_BASIC + '.csv', dtype=str)
list_code = stock_list['code'].values.tolist()
#list_code = list_code[100:200]
print len(list_code)
list_pool = list(itertools.combinations(list_code, 2))
stockPool = pd.DataFrame(list_pool, columns=['code1','code2'])
print stockPool.head()
stockPool.to_csv(TABLE_STOCKS_PAIRS + '.csv', header=False, index=False)
# input: int or string
# output: string
def getSixDigitalStockCode(code):
strZero = ''
for i in range(len(str(code)), 6):
strZero += '0'
return strZero + str(code)
def split(line):
"""
Operator function for splitting a line with csv module
"""
reader = csv.reader(StringIO(line))
return reader.next()
# 功能:从csv文件中读取一个字典
# 输入:文件名称,keyIndex,valueIndex
def readDictCSV(fileName="", dataDict = {}):
if not os.path.exists(fileName) :
return {}
with open(fileName, "r") as csvFile:
reader = csv.reader(csvFile)
for row in reader:
dataDict[str(row[0])] = [float(row[1]), float(row[2])]
csvFile.close()
return dataDict
# 功能:将一字典写入到csv文件中
# 输入:文件名称,数据字典
def writeDictCSV(fileName="", dataDict={}):
with open(fileName, "wb") as csvFile:
csvWriter = csv.writer(csvFile)
for k,v in dataDict.iteritems():
csvWriter.writerow([str(k), v[0], v[1]])
csvFile.close()
def writeRddCSV(fileName, rdd, sqlContext):
df = sqlContext.createDataFrame(rdd)
#print df.first()
#df.write.format("com.databricks.spark.csv").save(fileName)
df.toPandas().to_csv(fileName, header=False, index=False)
'''
with open(fileName, "wb") as csvFile:
csvWriter = csv.writer(csvFile)
rdd.foreach(lambda elem: writeElem(csvWriter, elem))
csvFile.close()
'''
def writeElem(csvWriter, elem):
csvWriter.writerow(elem[0], elem[1][1], elem[1][2])
def toCSVLine(data):
return ','.join(str(d) for d in data)
'''
mongo db operation
'''
def _connect_mongo(host, port):
""" A util for making a connection to mongo """
conn = MongoClient(host, port)
return conn
def get_connection_mongo(host, port):
connMongo = MongoClient(host, port)
return connMongo
def read_mongo(db, collection, query={}, column={}, host=MONGO_HOST, port=27017, username=None, password=None):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
connMongo = _connect_mongo(host, port)
dbMongo = connMongo[db]
# Make a query to the specific DB and Collection
cursor = dbMongo[collection].find(query, column)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
connMongo.close()
return df
def readCollectionMongo(collection):
return sc.mongoRDD('mongodb://'+MONGO_HOST+':27017/'+collection)
def writeCollectionMongo(rdd, collection):
rdd.saveToMongoDB('mongodb://'+MONGO_HOST+':27017/'+collection)
'''
linear regression with Stochastic Gradient Decent mothod
'''
def linregSGD(x, y, a, b):
# -------------------------------------------随机梯度下降算法----------------------------------------------------------
# 两种终止条件
loop_max = 10000 # 最大迭代次数(防止死循环)
epsilon = 1e-6
alpha = 0.001 # 步长(注意取值过大会导致振荡,过小收敛速度变慢)
diff = 0.
errorA = a
errorB = b
count = 0 # 循环次数
finish = False # 终止标志
m = len(x) # 训练数据点数目
while count < loop_max:
#count += 1
# 遍历训练数据集,不断更新权值
for i in range(m):
count += 1
diff = a + b * x[i] - y[i] # 训练集代入,计算误差值
# 采用随机梯度下降算法,更新一次权值只使用一组训练数据
a = a - alpha * diff
b = b - alpha * diff * x[i]
if ((a-errorA)*(a-errorA) + (b-errorB)*(b-errorB)) < epsilon:
# 终止条件:前后两次计算出的权向量的绝对误差充分小
finish = 1
break
else:
errorA = a
errorB = b
if finish == True: # 跳出循环
break
#print 'loop count = %d' % count, '\tweight:[%f, %f]' % (a, b)
return finish, a, b
def adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b):
if len(closeprice_of_1) >= 10 and len(closeprice_of_2) >= 10:
# adfuller won't work if data is not enough
finish, alpha, beta = linregSGD(x=closeprice_of_1, y=closeprice_of_2, a=a, b=b)
if not finish:
return False, a, b
spread = closeprice_of_2 - closeprice_of_1*beta - alpha
spread.dropna()
adfstat, pvalue, usedlag, nobs, critvalues, icbest = sts.adfuller(x=spread, maxlag=1)
return adfstat < critvalues['5%'], alpha, beta
else:
return False, 0, 0
'''
print adfstat
for(k, v) in critvalues.items():
print k, v
'''
def load_process(code1, code2, start_date, end_date):
m = getSixDigitalStockCode(code1)
n = getSixDigitalStockCode(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if (not os.path.exists(file1)) or (not os.path.exists(file1)):
return {},{}
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:start_date]
price_of_2 = kline2[end_date:start_date]
# regroup quotation according to date index
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
return closeprice_of_1, closeprice_of_2
def load_process_data_mongo(code1, code2, start_date, end_date):
m = getSixDigitalStockCode(code1)
n = getSixDigitalStockCode(code2)
collection1 = MONGO_TABLE_PREFIX + m
collection2 = MONGO_TABLE_PREFIX + n
query = {"date": {"$gte": start_date, "$lt": end_date}}
column = {"date":1, "close":1, "_id":0}
kline1 = read_mongo(MONGO_DB_QUOTATION, collection1, query, column)
kline2 = read_mongo(MONGO_DB_QUOTATION, collection2, query, column)
if kline1.empty or kline2.empty:
return {},{}
kline1['date'] = pd.to_datetime(kline1['date'], format='%Y-%m-%d')
kline2['date'] = pd.to_datetime(kline2['date'], format='%Y-%m-%d')
kline1.index = kline1['date'].tolist()
kline2.index = kline2['date'].tolist()
#price_of_1 = kline1[end_date:start_date]
#price_of_2 = kline2[end_date:start_date]
price_of_1 = kline1
price_of_2 = kline2
# regroup quotation according to date index
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination = combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
return closeprice_of_1, closeprice_of_2
def adfuller_check_price_sgd(code1, code2, start_date = '2013-10-10', end_date = '2015-09-30'):
closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
if len(closeprice_of_1)<=1 or len(closeprice_of_1)<=1:
return
# time5 = time.time()
if weightdict.has_key(code1+code2): # get previous weight
a = weightdict[code1+code2][0]
b = weightdict[code1+code2][1]
#print weightdict[code1+code2]
else:
#print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
# time6 = time.time()
# print "sgdmiddle running time(s): ", time6-time5
weightdict[code1+code2] = [result[1], result[2]] # update weight data
return result[0]
def adfuller_check_sgd_withweight(code1, code2, a, b, start_date = '2013-10-10', end_date = '2015-09-30'):
closeprice_of_1, closeprice_of_2 = load_process_data_mongo(code1, code2, start_date, end_date)
#closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
if len(closeprice_of_1)<=1 or len(closeprice_of_1)<=1:
#print "without data, you shall not pass"
return {"stk1":code1, "stk2":code2, "flag":0, "a":0, "b":0}
if not a or not b or (a==0 and b==0): # get previous weight
#print "not find w"
#np.random.seed(2)
#a, b = np.random.randn(2)
a = 0
b = 0
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
return {"stk1":code1, "stk2":code2, "flag":np.float64(result[0]).item(), \
"a":np.float64(result[1]).item(), "b":np.float64(result[2]).item()}
def adfuller_check(code1, code2, start_date = '2013-10-10', end_date = '2015-09-30'):
m = getSixDigitalStockCode(code1)
n = getSixDigitalStockCode(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if not os.path.exists(file1) or not os.path.exists(file1):
return False
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:start_date]
price_of_2 = kline2[end_date:start_date]
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
if len(closeprice_of_1) != 0 and len(closeprice_of_2) != 0:
X = sm.add_constant(closeprice_of_1)
model = sm.OLS(endog=closeprice_of_2, exog=X)
result = model.fit()
spread = result.resid
stat = sts.adfuller(x=spread)
adf = stat[0]
pvalue = stat[1]
critical_values = stat[4]
pair = m + '+' + n
return adf < critical_values['5%']
def adfuller_check2(row):
#return adfuller_check(row[0], row[1])
#return adfuller_check_price_sgd(row[0], row[1], start_date = '2013-10-10', end_date = '2014-09-30')
return adfuller_check_price_sgd(row[0], row[1], start_date = '2013-10-10', end_date = '2015-09-30')
def adfuller_check4(code1, code2, a, b):
return adfuller_check_sgd_withweight(code1, code2, a, b, start_date = '2013-10-10', end_date = '2015-09-30')
def check_all_dir(sc):
stockPool = readCollectionMongo(MONGO_TABLE_STOCKS_PAIRS) # load weight file
print stockPool.take(2)
print "starting adf checking"
#adfResult = stockPool.map(adfuller_check2)
#adfResult = stockPool.filter(adfuller_check2)
# row seems to be a dict
adfResult = stockPool.map(lambda f: (adfuller_check4(f["stk1"], f["stk2"], f["a"], f["b"])))
#adfResult.collect()
print "%d <<<pairings" % adfResult.count()
print adfResult.first()
print "write to mongo db"
try:
writeCollectionMongo(adfResult, MONGO_TABLE_WEIGHT_SAVED)
except Exception, e:
writeCollectionMongo(adfResult, MONGO_TABLE_WEIGHT_SAVED)
## Main functionality
def main(sc):
time1 = time.time()
#adfuller_check2("601002", "600815")
# check all stock pairing in list book
#save_stk_pairings()
check_all_dir(sc)
time2 = time.time()
print "running time(s): ", time2-time1
if __name__ == "__main__":
# Configure Spark
conf = SparkConf().setAppName(APP_NAME)
conf = conf.setMaster("local[*]")
sc = SparkContext(conf=conf)
# Execute Main functionality
main(sc)
| gpl-2.0 |
necozay/tulip-control | setup.py | 1 | 5141 | #!/usr/bin/env python
import logging
from setuptools import setup
import subprocess
import os
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering']
package_data = {
'tulip': ['commit_hash.txt'],
'tulip.transys.export': ['d3.v3.min.js'],
'tulip.spec': ['parsetab.py']}
def retrieve_git_info():
"""Return commit hash of HEAD, or "release", or None if failure.
If the git command fails, then return None.
If HEAD has tag with prefix "tulip-" or "vM" where M is an
integer, then return 'release'.
Tags with such names are regarded as version or release tags.
Otherwise, return the commit hash as str.
"""
# Is Git installed?
try:
subprocess.call(['git', '--version'],
stdout=subprocess.PIPE)
except OSError:
return None
# Decide whether this is a release
p = subprocess.Popen(
['git', 'describe', '--tags', '--candidates=0', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
if p.returncode == 0:
tag = p.stdout.read()
logger.debug('Most recent tag: ' + tag)
if tag.startswith('tulip-'):
return 'release'
if len(tag) >= 2 and tag.startswith('v'):
try:
int(tag[1])
return 'release'
except ValueError:
pass
# Otherwise, return commit hash
p = subprocess.Popen(
['git', 'log', '-1', '--format=%H'],
stdout=subprocess.PIPE)
p.wait()
sha1 = p.stdout.read()
logger.debug('SHA1: ' + sha1)
return sha1
def package_jtlv():
if os.path.exists(os.path.join('tulip', 'interfaces', 'jtlv_grgame.jar')):
print('Found optional JTLV-based solver.')
package_data['tulip.interfaces'] = ['jtlv_grgame.jar']
else:
print('The jtlv synthesis tool was not found. '
'Try extern/get-jtlv.sh to get it.\n'
'It is an optional alternative to gr1c, '
'the default GR(1) solver of TuLiP.')
def run_setup():
# Build PLY table, to be installed as tulip package data
try:
import tulip.spec.lexyacc
tabmodule = tulip.spec.lexyacc.TABMODULE.split('.')[-1]
outputdir = 'tulip/spec'
parser = tulip.spec.lexyacc.Parser()
parser.build(tabmodule, outputdir=outputdir,
write_tables=True,
debug=True, debuglog=logger)
plytable_build_failed = False
except Exception as e:
logger.debug('Failed to build PLY tables: {e}'.format(e=e))
plytable_build_failed = True
# If .git directory is present, create commit_hash.txt accordingly
# to indicate version information
if os.path.exists('.git'):
# Provide commit hash or empty file to indicate release
sha1 = retrieve_git_info()
if sha1 is None:
sha1 = 'unknown-commit'
elif sha1 is 'release':
sha1 = ''
else:
logger.debug('dev sha1: ' + str(sha1))
commit_hash_header = (
'# DO NOT EDIT! '
'This file was automatically generated by setup.py of TuLiP')
with open("tulip/commit_hash.txt", "w") as f:
f.write(commit_hash_header + "\n")
f.write(sha1 + "\n")
# Import tulip/version.py without importing tulip
import imp
version = imp.load_module("version",
*imp.find_module("version", ["tulip"]))
tulip_version = version.version
# setup
package_jtlv()
setup(
name='tulip',
version=tulip_version,
description='Temporal Logic Planning (TuLiP) Toolbox',
author='Caltech Control and Dynamical Systems',
author_email='[email protected]',
url='http://tulip-control.org',
bugtrack_url='http://github.com/tulip-control/tulip-control/issues',
license='BSD',
classifiers=classifiers,
install_requires=[
'ply >= 3.4',
'networkx >= 1.6',
'numpy >= 1.7',
'pydot >= 1.0.28',
'scipy'],
extras_require={
'hybrid': ['cvxopt >= 1.1.7',
'polytope >= 0.1.1']},
tests_require=[
'nose',
'matplotlib'],
packages=[
'tulip', 'tulip.transys', 'tulip.transys.export',
'tulip.abstract', 'tulip.spec',
'tulip.interfaces'],
package_dir={'tulip': 'tulip'},
package_data=package_data)
# ply failed ?
if plytable_build_failed:
print("!"*65)
print(" Failed to build PLY table. Please run setup.py again.")
print("!"*65)
if __name__ == '__main__':
run_setup()
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/metrics/base.py | 22 | 4802 |
"""
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as UndefinedMetricWarning_
from ..utils import deprecated
class UndefinedMetricWarning(UndefinedMetricWarning_):
pass
UndefinedMetricWarning = deprecated("UndefinedMetricWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(UndefinedMetricWarning)
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
frodo4fingers/gimod | core/imagery.py | 1 | 5990 | #!/usr/bin/env python
# encoding: UTF-8
import matplotlib.pyplot as plt
try:
from PyQt5.QtWidgets import QFileDialog
except ImportError:
from PyQt4.QtGui import QFileDialog
try:
import cv3
found_cv = True
except ImportError:
found_cv = False
class ImageTools():
"""
Provide the tools for contrast recognition with OpenCV and the algorithms to split that into multiple paths or set chosen picture as background.
"""
def __init__(self, parent=None):
"""
Initialize the parent widgets of the toolbar.
Todo
----
+ get rid of the dummy flag imageClicked
"""
self.found_cv = found_cv
self.parent = parent
self.statusbar = parent.statusbar
self.threshold1 = parent.toolBar.acn_imageThreshold1.value()
self.threshold2 = parent.toolBar.acn_imageThreshold2.value()
self.imagePolys = parent.toolBar.acn_imagePolys
self.polyDensity = parent.toolBar.acn_imageDensity
self.background = parent.toolBar.acn_imageAsBackground
# self.fname = parent.toolBar.fname
self.figure = parent.plotWidget
self.imageClicked = True
def getContours(self):
"""
Take the passed image, b/w the image and make paths out of the contours.
Todo
----
maybe: http://scikit-learn.org/stable/auto_examples/cluster/plot_face_ward_segmentation.html
"""
# read image
src = cv2.imread(self.fname)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# basic threshold
th, dst = cv2.threshold(img, float(self.threshold1), float(
self.threshold2), cv2.THRESH_BINARY)
# find Contours
image, contours, hierarchy = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort after polygon area and start with largest area
paths = sorted(contours, key=cv2.contourArea)[::-1]
# sort out those structures that are smaller than 6 dots, first one is frame
self.paths = [i for i in paths if len(i) > 5][1:]
self.statusbar.showMessage("{} possible polygons found".format(len(self.paths)))
# adjust the spinbox with number of polygons
# TODO: get this out of here!!
self.imagePolys.setRange(1, len(self.paths))
# draw initially
self.polysFromImage()
def polysFromImage(self):
"""Take the number of polys chosen in the spinbox."""
cut_down = self.paths[:self.imagePolys.value()]
self.contours = []
for path in cut_down:
tuples = []
for tup in path:
tuples.append([float(tup[0][0]), float(tup[0][1])])
self.contours.append(tuples)
self.dotDensityOfPolygons()
def dotDensityOfPolygons(self):
"""Take every n-th tuple specified by imageDensity spinbox."""
self.contoursCutted = []
for p in self.contours:
self.contoursCutted.append([p[i] for i in range(0, len(p), self.polyDensity.value())])
# self.findMinMax()
self.imagePlot()
def imagePlot(self):
"""Scatter plot all found paths from figure."""
self.figure.axis.cla()
for p in self.contoursCutted:
self.figure.axis.scatter(*zip(*p), alpha=0.5, s=2)
# self.figure.axis.set_ylim(self.figure.axis.get_ylim()[::-1])
self.figure.canvas.draw()
def setBackground(self):
"""Set chosen image file as background to draw over."""
self.figure.axis.cla()
img = plt.imread(self.fname)
self.figure.axis.imshow(img, alpha=0.6)
self.figure.canvas.draw()
def imagery(self):
"""Hide and show the widget for image threshold in the toolbar."""
if self.imageClicked is True:
# FIXME: cheeky piece of shit... wont accept given formats
self.fname = QFileDialog.getOpenFileName(None, caption='choose sketch')[0]
if self.fname:
self.parent.toolBar.widgetAction.setVisible(True)
self.parent.toolBar.acn_polygonize.setEnabled(True)
self.parent.toolBar.acn_reset_figure.setEnabled(True)
self.imageClicked = False
# instanciate the imageTools class
# self.imageTools = ImageTools(self)
self.getContours()
else:
self.parent.toolBar.widgetAction.setVisible(False)
self.parent.toolBar.acn_polygonize.setEnabled(False)
self.parent.toolBar.acn_image.setChecked(False)
self.imageClicked = True
def updateImagery(self):
"""
GET RID OF THIS. but great reminder for how slow things are... this is called everytime something changes in the image threshold widget from the toolbar!!!!
Todo
----
storm my brain to find a better and faster solution for this
"""
self.getContours()
def imageryBackground(self):
"""
Set chosen image as background of the figure so the traces can be drawn by hand. If chosen the rest is not needed.
"""
if self.parent.toolBar.acn_imageAsBackground.isChecked() is True:
self.parent.toolBar.acn_imageThreshold1.setEnabled(False)
self.parent.toolBar.acn_imageThreshold2.setEnabled(False)
self.parent.toolBar.acn_imagePolys.setEnabled(False)
self.parent.toolBar.acn_imageDensity.setEnabled(False)
self.parent.toolBar.acn_polygonize.setEnabled(False)
self.setBackground()
else:
self.updateImagery()
self.parent.toolBar.acn_imageThreshold1.setEnabled(True)
self.parent.toolBar.acn_imageThreshold2.setEnabled(True)
self.parent.toolBar.acn_imagePolys.setEnabled(True)
self.parent.toolBar.acn_imageDensity.setEnabled(True)
self.parent.toolBar.acn_polygonize.setEnabled(True)
if __name__ == '__main__':
pass
| gpl-3.0 |
areeda/gwpy | gwpy/plot/plot.py | 2 | 21853 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of the basic matplotlib Figure for GWpy
"""
import itertools
import importlib
import warnings
from collections.abc import (KeysView, ValuesView)
from itertools import zip_longest
import numpy
from matplotlib import (figure, get_backend, _pylab_helpers)
from matplotlib.artist import setp
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import LogFormatterSciNotation
from matplotlib.projections import get_projection_class
from . import (colorbar as gcbar, utils)
from .gps import GPS_SCALES
from .log import LogFormatter
from .rc import (rcParams, MPL_RCPARAMS, get_subplot_params)
__all__ = ['Plot']
try:
__IPYTHON__
except NameError:
IPYTHON = False
else:
IPYTHON = True
iterable_types = (list, tuple, KeysView, ValuesView,)
def interactive_backend():
"""Returns `True` if the current backend is interactive
"""
from matplotlib.rcsetup import interactive_bk
return get_backend() in interactive_bk
def get_backend_mod(name=None):
"""Returns the imported module for the given backend name
Parameters
----------
name : `str`, optional
the name of the backend, defaults to the current backend.
Returns
-------
backend_mod: `module`
the module as returned by :func:`importlib.import_module`
Examples
--------
>>> from gwpy.plot.plot import get_backend_mod
>>> print(get_backend_mod('agg'))
<module 'matplotlib.backends.backend_agg' from ... >
"""
if name is None:
name = get_backend()
backend_name = (name[9:] if name.startswith("module://") else
"matplotlib.backends.backend_{}".format(name.lower()))
return importlib.import_module(backend_name)
class Plot(figure.Figure):
"""An extension of the core matplotlib `~matplotlib.figure.Figure`
The `Plot` provides a number of methods to simplify generating
figures from GWpy data objects, and modifying them on-the-fly in
interactive mode.
"""
def __init__(self, *data, **kwargs):
# get default x-axis scale if all axes have the same x-axis units
kwargs.setdefault('xscale', _parse_xscale(
_group_axes_data(data, flat=True)))
# set default size for time-axis figures
if (
kwargs.get('projection', None) == 'segments'
or kwargs.get('xscale') in GPS_SCALES
):
kwargs.setdefault('figsize', (12, 6))
kwargs.setdefault('xscale', 'auto-gps')
# initialise figure
figure_kw = {key: kwargs.pop(key) for key in utils.FIGURE_PARAMS if
key in kwargs}
self._init_figure(**figure_kw)
# initialise axes with data
if data or kwargs.get("geometry"):
self._init_axes(data, **kwargs)
def _init_figure(self, **kwargs):
from matplotlib import pyplot
# add new attributes
self.colorbars = []
self._coloraxes = []
# create Figure
num = kwargs.pop('num', max(pyplot.get_fignums() or {0}) + 1)
self._parse_subplotpars(kwargs)
super().__init__(**kwargs)
self.number = num
# add interactivity (scraped from pyplot.figure())
backend_mod = get_backend_mod()
try:
manager = backend_mod.new_figure_manager_given_figure(num, self)
except AttributeError:
upstream_mod = importlib.import_module(
pyplot.new_figure_manager.__module__)
canvas = upstream_mod.FigureCanvasBase(self)
manager = upstream_mod.FigureManagerBase(canvas, 1)
manager._cidgcf = manager.canvas.mpl_connect(
'button_press_event',
lambda ev: _pylab_helpers.Gcf.set_active(manager))
_pylab_helpers.Gcf.set_active(manager)
pyplot.draw_if_interactive()
def _init_axes(self, data, method='plot',
xscale=None, sharex=False, sharey=False,
geometry=None, separate=None, **kwargs):
"""Populate this figure with data, creating `Axes` as necessary
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# parse keywords
axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if
key in kwargs}
# handle geometry and group axes
if geometry is not None and geometry[0] * geometry[1] == len(data):
separate = True
axes_groups = _group_axes_data(data, separate=separate)
if geometry is None:
geometry = (len(axes_groups), 1)
nrows, ncols = geometry
if axes_groups and nrows * ncols != len(axes_groups):
# mismatching data and geometry
raise ValueError("cannot group data into {0} axes with a "
"{1}x{2} grid".format(len(axes_groups), nrows,
ncols))
# create grid spec
gs = GridSpec(nrows, ncols)
axarr = numpy.empty((nrows, ncols), dtype=object)
# set default labels
defxlabel = 'xlabel' not in axes_kw
defylabel = 'ylabel' not in axes_kw
flatdata = [s for group in axes_groups for s in group]
for axis in ('x', 'y'):
unit = _common_axis_unit(flatdata, axis=axis)
if unit:
axes_kw.setdefault('{}label'.format(axis),
unit.to_string('latex_inline_dimensional'))
# create axes for each group and draw each data object
for group, (row, col) in zip_longest(
axes_groups, itertools.product(range(nrows), range(ncols)),
fillvalue=[]):
# create Axes
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
axes_kw["sharex"] = shared_with[sharex]
axes_kw["sharey"] = shared_with[sharey]
axes_kw['xscale'] = xscale if xscale else _parse_xscale(group)
ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw)
# plot data
plot_func = getattr(ax, method)
if method in ('imshow', 'pcolormesh'):
for obj in group:
plot_func(obj, **kwargs)
elif group:
plot_func(*group, **kwargs)
# set default axis labels
for axis, share, pos, n, def_ in (
(ax.xaxis, sharex, row, nrows, defxlabel),
(ax.yaxis, sharey, col, ncols, defylabel),
):
# hide label if shared axis and not bottom left panel
if share == 'all' and pos < n - 1:
axis.set_label_text('')
# otherwise set default status
else:
axis.isDefault_label = def_
return self.axes
@staticmethod
def _parse_subplotpars(kwargs):
# dynamically set the subplot positions based on the figure size
# -- only if the user hasn't customised the subplot params
figsize = kwargs.get('figsize') or rcParams['figure.figsize']
subplotpars = get_subplot_params(figsize)
use_subplotpars = 'subplotpars' not in kwargs and all([
rcParams['figure.subplot.%s' % pos]
== MPL_RCPARAMS['figure.subplot.%s' % pos]
for pos in ('left', 'bottom', 'right', 'top')
])
if use_subplotpars:
kwargs['subplotpars'] = subplotpars
# -- Plot methods ---------------------------
def refresh(self):
"""Refresh the current figure
"""
for cbar in self.colorbars:
cbar.draw_all()
self.canvas.draw()
def show(self, block=None, warn=True):
"""Display the current figure (if possible).
If blocking, this method replicates the behaviour of
:func:`matplotlib.pyplot.show()`, otherwise it just calls up to
:meth:`~matplotlib.figure.Figure.show`.
This method also supports repeatedly showing the same figure, even
after closing the display window, which isn't supported by
`pyplot.show` (AFAIK).
Parameters
----------
block : `bool`, optional
open the figure and block until the figure is closed, otherwise
open the figure as a detached window, default: `None`.
If `None`, block if using an interactive backend and _not_
inside IPython.
warn : `bool`, optional
print a warning if matplotlib is not running in an interactive
backend and cannot display the figure, default: `True`.
"""
# this method tries to reproduce the functionality of pyplot.show,
# mainly for user convenience. However, as of matplotlib-3.0.0,
# pyplot.show() ends up calling _back_ to Plot.show(),
# so we have to be careful not to end up in a recursive loop
import inspect
try:
callframe = inspect.currentframe().f_back
except AttributeError:
pass
else:
if 'matplotlib' in callframe.f_code.co_filename:
block = False
# render
super().show(warn=warn)
# don't block on ipython with interactive backends
if block is None and interactive_backend():
block = not IPYTHON
# block in GUI loop (stolen from mpl.backend_bases._Backend.show)
if block:
backend_mod = get_backend_mod()
backend_mod.Show().mainloop()
def save(self, *args, **kwargs):
"""Save the figure to disk.
This method is an alias to :meth:`~matplotlib.figure.Figure.savefig`,
all arguments are passed directory to that method.
"""
self.savefig(*args, **kwargs)
def close(self):
"""Close the plot and release its memory.
"""
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
# avoid matplotlib/matplotlib#9970
ax.set_xscale('linear')
ax.set_yscale('linear')
# clear the axes
ax.cla()
# close the figure
close(self)
# -- axes manipulation ----------------------
def get_axes(self, projection=None):
"""Find all `Axes`, optionally matching the given projection
Parameters
----------
projection : `str`
name of axes types to return
Returns
-------
axlist : `list` of `~matplotlib.axes.Axes`
"""
if projection is None:
return self.axes
return [ax for ax in self.axes if ax.name == projection.lower()]
# -- colour bars ----------------------------
def colorbar(self, mappable=None, cax=None, ax=None, fraction=0.,
emit=True, **kwargs):
"""Add a colorbar to the current `Plot`
A colorbar must be associated with an `Axes` on this `Plot`,
and an existing mappable element (e.g. an image).
Parameters
----------
mappable : matplotlib data collection
Collection against which to map the colouring
cax : `~matplotlib.axes.Axes`
Axes on which to draw colorbar
ax : `~matplotlib.axes.Axes`
Axes relative to which to position colorbar
fraction : `float`, optional
Fraction of original axes to use for colorbar, give `fraction=0`
to not resize the original axes at all.
emit : `bool`, optional
If `True` update all mappables on `Axes` to match the same
colouring as the colorbar.
**kwargs
other keyword arguments to be passed to the
:meth:`~matplotlib.figure.Figure.colorbar`
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
matplotlib.figure.Figure.colorbar
matplotlib.colorbar.Colorbar
Examples
--------
>>> import numpy
>>> from gwpy.plot import Plot
To plot a simple image and add a colorbar:
>>> plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> plot.colorbar(label='Value')
>>> plot.show()
Colorbars can also be generated by directly referencing the parent
axes:
>>> Plot = Plot()
>>> ax = plot.gca()
>>> ax.imshow(numpy.random.randn(120).reshape((10, 12)))
>>> ax.colorbar(label='Value')
>>> plot.show()
"""
# pre-process kwargs
mappable, kwargs = gcbar.process_colorbar_kwargs(
self, mappable, ax, cax=cax, fraction=fraction, **kwargs)
# generate colour bar
cbar = super().colorbar(mappable, **kwargs)
# force the minor ticks to be the same as the major ticks
# in practice, this normally swaps out LogFormatterSciNotation to
# gwpy's LogFormatter; # this is hacky, and would be improved using a
# subclass of Colorbar in the first place, but matplotlib's
# cbar_factory doesn't support that
longaxis = (cbar.ax.yaxis if cbar.orientation == "vertical" else
cbar.ax.xaxis)
if (
isinstance(cbar.formatter, LogFormatter)
and isinstance(
longaxis.get_minor_formatter(),
LogFormatterSciNotation,
)
):
longaxis.set_minor_formatter(type(cbar.formatter)())
# record colorbar in parent object
self.colorbars.append(cbar)
# update mappables for this axis
if emit:
ax = kwargs.pop('ax')
norm = mappable.norm
cmap = mappable.get_cmap()
for map_ in ax.collections + ax.images:
map_.set_norm(norm)
map_.set_cmap(cmap)
return cbar
def add_colorbar(self, *args, **kwargs):
"""DEPRECATED, use `Plot.colorbar` instead
"""
warnings.warn(
"{0}.add_colorbar was renamed {0}.colorbar, this warnings will "
"result in an error in the future".format(type(self).__name__),
DeprecationWarning)
return self.colorbar(*args, **kwargs)
# -- extra methods --------------------------
def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1,
sharex=True, location='bottom', **plotargs):
"""Add a segment bar `Plot` indicating state information.
By default, segments are displayed in a thin horizontal set of Axes
sitting immediately below the x-axis of the main,
similarly to a colorbar.
Parameters
----------
segments : `~gwpy.segments.DataQualityFlag`
A data-quality flag, or `SegmentList` denoting state segments
about this Plot
ax : `Axes`, optional
Specific `Axes` relative to which to position new `Axes`,
defaults to :func:`~matplotlib.pyplot.gca()`
height : `float, `optional
Height of the new axes, as a fraction of the anchor axes
pad : `float`, optional
Padding between the new axes and the anchor, as a fraction of
the anchor axes dimension
sharex : `True`, `~matplotlib.axes.Axes`, optional
Either `True` to set ``sharex=ax`` for the new segment axes,
or an `Axes` to use directly
location : `str`, optional
Location for new segment axes, defaults to ``'bottom'``,
acceptable values are ``'top'`` or ``'bottom'``.
**plotargs
extra keyword arguments are passed to
:meth:`~gwpy.plot.SegmentAxes.plot`
"""
# get axes to anchor against
if not ax:
ax = self.gca()
# set options for new axes
axes_kw = {
'pad': pad,
'add_to_figure': True,
'sharex': ax if sharex is True else sharex or None,
'axes_class': get_projection_class('segments'),
}
# map X-axis limit from old axes
if axes_kw['sharex'] is ax and not ax.get_autoscalex_on():
axes_kw['xlim'] = ax.get_xlim()
# if axes uses GPS scaling, copy the epoch as well
try:
axes_kw['epoch'] = ax.get_epoch()
except AttributeError:
pass
# add new axes
if ax.get_axes_locator():
divider = ax.get_axes_locator()._axes_divider
else:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
if location not in {'top', 'bottom'}:
raise ValueError("Segments can only be positoned at 'top' or "
"'bottom'.")
segax = divider.append_axes(location, height, **axes_kw)
# update anchor axes
if axes_kw['sharex'] is ax and location == 'bottom':
# map label
segax.set_xlabel(ax.get_xlabel())
segax.xaxis.isDefault_label = ax.xaxis.isDefault_label
ax.set_xlabel("")
# hide ticks on original axes
setp(ax.get_xticklabels(), visible=False)
# plot segments
segax.plot(segments, **plotargs)
segax.grid(b=False, which='both', axis='y')
segax.autoscale(axis='y', tight=True)
return segax
def add_state_segments(self, *args, **kwargs):
"""DEPRECATED: use :meth:`Plot.add_segments_bar`
"""
warnings.warn('add_state_segments() was renamed add_segments_bar(), '
'this warning will result in an error in the future',
DeprecationWarning)
return self.add_segments_bar(*args, **kwargs)
# -- utilities ----------------------------------------------------------------
def _group_axes_data(inputs, separate=None, flat=False):
"""Determine the number of axes from the input args to this `Plot`
Parameters
----------
inputs : `list` of array-like data sets
A list of data arrays, or a list of lists of data sets
sep : `bool`, optional
Plot each set of data on a separate `Axes`
flat : `bool`, optional
Return a flattened list of data objects
Returns
-------
axesdata : `list` of lists of array-like data
A `list` with one element per required `Axes` containing the
array-like data sets for those `Axes`, unless ``flat=True``
is given.
Notes
-----
The logic for this method is as follows:
- if a `list` of data arrays are given, and `separate=False`, use 1 `Axes`
- if a `list` of data arrays are given, and `separate=True`, use N `Axes,
one for each data array
- if a nested `list` of data arrays are given, ignore `sep` and
use one `Axes` for each group of arrays.
Examples
--------
>>> from gwpy.plot import Plot
>>> Plot._group_axes_data([1, 2], separate=False)
[[1, 2]]
>>> Plot._group_axes_data([1, 2], separate=True)
[[1], [2]]
>>> Plot._group_axes_data([[1, 2], 3])
[[1, 2], [3]]
"""
# determine auto-separation
if separate is None and inputs:
# if given a nested list of data, multiple axes are required
if any(isinstance(x, iterable_types + (dict,)) for x in inputs):
separate = True
# if data are of different types, default to separate
elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721
separate = True
# build list of lists
out = []
for x in inputs:
if isinstance(x, dict): # unwrap dict
x = list(x.values())
# new group from iterable, notes:
# the iterable is presumed to be a list of independent data
# structures, unless its a list of scalars in which case we
# should plot them all as one
if (
isinstance(x, (KeysView, ValuesView))
or isinstance(x, (list, tuple)) and (
not x
or not numpy.isscalar(x[0])
)
):
out.append(x)
# dataset starts a new group
elif separate or not out:
out.append([x])
# dataset joins current group
else: # append input to most recent group
out[-1].append(x)
if flat:
return [s for group in out for s in group]
return out
def _common_axis_unit(data, axis='x'):
units = set()
uname = '{}unit'.format(axis)
for x in data:
units.add(getattr(x, uname, None))
if len(units) == 1:
return units.pop()
return None
def _parse_xscale(data):
unit = _common_axis_unit(data, axis='x')
if unit is None:
return None
if unit.physical_type == 'time':
return 'auto-gps'
| gpl-3.0 |
DakotaNelson/SoftwareSystems | hw01/ch01.py | 24 | 2897 | """Modified version of the example code from Janert,
Feedback Control For Computer Systems
This modified version requires pandas, numpy, and matplotlib.
If you use apt:
sudo apt-get install python-pandas python-numpy python-matplotlib
"""
import numpy
import pandas
import random
import matplotlib.pyplot as pyplot
class Buffer:
def __init__( self, max_wip, max_flow ):
"""Initializes the buffer:
max_wip: maximum work in progress
max_flow: maximum work completed per time step
"""
self.queued = 0
self.wip = 0 # work-in-progress ("ready pool")
self.max_wip = max_wip
self.max_flow = max_flow # avg outflow is max_flow/2
def work( self, u ):
# Add to ready pool
u = max( 0, int(round(u)) )
u = min( u, self.max_wip )
self.wip += u
# Transfer from ready pool to queue
r = int( round( random.uniform( 0, self.wip ) ) )
self.wip -= r
self.queued += r
# Release from queue to downstream process
r = int( round( random.uniform( 0, self.max_flow ) ) )
r = min( r, self.queued )
self.queued -= r
return self.queued
class Controller:
def __init__( self, kp, ki ):
"""Initializes the controller.
kp: proportional gain
ki: integral gain
"""
self.kp, self.ki = kp, ki
self.i = 0 # Cumulative error ("integral")
def work( self, e ):
"""Computes the number of jobs to be added to the ready queue.
e: error
returns: float number of jobs
"""
self.i += e
return self.kp*e + self.ki*self.i
# ============================================================
def closed_loop( c, p, tm=5000 ):
"""Simulates a closed loop control system.
c: Controller object
p: Buffer object
tm: number of time steps
returns: tuple of sequences (times, targets, errors)
"""
def setpoint( t ):
if t < 100: return 0
if t < 300: return 50
return 10
y = 0
res = []
for t in range( tm ):
r = setpoint(t)
e = r - y
u = c.work(e)
y = p.work(u)
#print t, r, e, u, y
res.append((t, r, e, u, y))
return zip(*res)
# ============================================================
c = Controller( 1.25, 0.01 )
p = Buffer( 50, 10 )
# run the simulation
ts, rs, es, us, ys = closed_loop( c, p, 1000 )
print 'RMS error', numpy.sqrt(numpy.mean(numpy.array(es)**2))
# generate the smoothed curve using a rolling mean
# (I think the curves in the book use loess)
ys_smooth = pandas.rolling_mean(numpy.array(ys), 20)
# make the plot
pyplot.plot(ts, rs, color='green', label='target')
pyplot.plot(ts, ys, color='red', label='queue length')
pyplot.plot(ts, ys_smooth, color='blue', label='trend')
pyplot.show()
| gpl-3.0 |
mattions/TimeScales | spineIntegration.py | 1 | 21242 | # Author Michele Mattioni
# Wed Mar 18 17:51:51 GMT 2009
import os
import logging
FORMAT = '%(levelname)s %(name)s %(lineno)s %(message)s'
if os.environ.has_key('DEBUG'):
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
else:
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
import numpy as np
import math
import sys
from neuron import h
import matplotlib
backend = 'Agg'
matplotlib.use(backend)
import matplotlib.pyplot as plt
from neuronvisio.manager import Manager
from sumatra.external.NeuroTools import parameters
from neuronControl.nrnManager import NeuronManager
from neuronControl.stimul import Stimul
#import tables
from extref import ExtRef
def iClamptest(delay=10, duration=250, amplititude=0.248):
"""# Quick Iclamp test to check the current dynamics"""
iclamp = h.IClamp(h.MSP_Cell[0].soma(0.5))
iclamp.delay = delay
iclamp.dur = duration
iclamp.amp = amplititude
class Runner():
"""Class to run the two simulator together"""
def __init__(self, param_dict):
"""Read the param and create the manager from neuronvisio"""
self.param = param_dict
# Create Neuronvisio Manager
self.manager = Manager()
self.vecs = {}
def advance_ecell(self, spine, delta_t):
"""
Advance ecell simulator in `spine` of `delta_t`.
Paramters:
----------
tmp_tstop: Temporary tstop. It has to be expressed in seconds
"""
current_time = spine.ecellMan.ses.getCurrentTime()
len_current_time = len (spine.ecellMan.loggers['ca'].getData()[:,0])
logger.debug ("Ecell current time: %s in %s. Advancing of: %s seconds.\
Current time len: %s" %(current_time, spine.id, delta_t, len_current_time))
spine.ecellMan.ses.run(delta_t)
def advance_quickly(self, tmp_tstop, nrnManager):
"""
Advance the two simulators quickly in an independent way. Synapse weight
is synchronized at the end
"""
stimulated_spines = self.param['stimulated_spines']
#Update the weight
for spine_id in stimulated_spines:
spine = nrnManager.spines[spine_id]
self.update_synape_weight(spine)
delta_ecell = tmp_tstop - h.t
delta_ecell_seconds = delta_ecell / 1e3
logger.info ("\nAdvance quickly routine.")
logger.info ("Current Neuron time: %s, aimed tstop[ms]: %s" %(h.t, tmp_tstop))
logger.info ("Delta applied on Ecell simulator [s]: %s\n" % delta_ecell_seconds)
nrnManager.run(tmp_tstop)
for spine_id in stimulated_spines:
spine = nrnManager.spines[spine_id]
self.advance_ecell(spine, delta_ecell_seconds)
self.update_synape_weight(spine)
def build_vecs_to_plot(self, var, secs, anyRefs):
"""Create the dictionary of section->vectors to plot"""
vecs_to_plot = {}
for secName in secs:
for ref in anyRefs:
if secName == ref.sec_name:
if ref.vecs.has_key(var):
key = secName + '_' + var
vecs_to_plot[key] = ref.vecs[var]
return vecs_to_plot
def create_excitatory_inputs(self, nrnManager):
"""
Create the excitatory inputs according to the parametes file.
- Create the NEURON inputs on each synapses according to the parameters
- Record the synaptic vars with a SynVecRef
- Set and initialize the ecell biochemical simulator in the stimulated
spine."""
excitatory_stimuli = []
for spine_id in self.param['stimulated_spines']:
if spine_id in self.param.keys(): # for each spine we list the input
spine = nrnManager.spines[spine_id]
for stim_id in self.param[spine.id]:
stim_dictionary = self.param[stim_id]
if stim_dictionary.has_key('t_stim'):
stim = Stimul((stim_dictionary['t_stim']),
stim_dictionary['numbers'],
stim_dictionary['delay'],
stim_dictionary['type'])
if stim.chan_type == 'ampa':
for syn in spine.synapses:
if syn.chan_type == 'ampa':
syn.stims.append(stim)
elif stim.chan_type == 'nmda':# more than one stim
for syn in spine.synapses:
if syn.chan_type == 'nmda':
syn.stims.append(stim)
stims_time = stim.get_stims_time()
excitatory_stimuli.extend(stims_time)
else:
logger.info("No stim applied to spine: %s" %spine_id)
spine.deploy_stims(self.param['neuron_time_recording_interval'])
if self.param['bio_on']:
spine.setup_bio_sim() # Initializing ecell
excitatory_stimuli = list(set(excitatory_stimuli))
excitatory_stimuli.sort()
return excitatory_stimuli
def create_vectors(self):
"Vectors to store the resutls"
for sec in h.allsec():
for var in self.param['var_to_plot']:
vec = self.manager.create_record_vector(sec, var, None)
sec_name = sec.name()
if self.vecs.has_key(sec_name):
self.vecs[sec_name].append(vec)
else:
self.vecs[sec_name] = [vec]
def get_calcium_flux(self, spine):
"""
Retrieving the calcium in the interval. end is always -1 because is
the last timepoint available, start is when the interval has begun
"""
delta_calcium_sampling = self.param['delta_calcium_sampling']
## We use the recorded interval instead of the dt, 'cause we are
## working directly with the time recording
# start_index = -int(delta_calcium_sampling/ self.param['dtNeuron'])
start_index = -int(delta_calcium_sampling/ self.param['neuron_time_recording_interval'])
# Getting the calcium value
vec_spine_head_cai = self.manager.get_vector(spine.head, 'cai')
vec_spine_head_cali = self.manager.get_vector(spine.head, 'cali')
head_cai = vec_spine_head_cai.x[start_index]
head_cali = vec_spine_head_cali.x[start_index]
electrical_ca_start = head_cai + head_cali
head_cai = vec_spine_head_cai.x[-1]
head_cali = vec_spine_head_cali.x[-1]
electrical_ca_end = head_cai + head_cali
electrical_diff = electrical_ca_end - electrical_ca_start
# logger.debug( "Len vecs: %s start_idx: %s" %(len(vec_spine_head_cali), start_index))
# logger.debug( "Electrical calcium start: %s end: %s difference: %s" %(electrical_ca_start,
# electrical_ca_end,
# electrical_diff))
# Calculating the flux
k_calcium_flux = electrical_diff / delta_calcium_sampling
return k_calcium_flux
def equilibrium(self, nrnManager):
"""Brings both NEURON and Ecell to equilibrium"""
logger.info ("#--#")
logger.info ("Equilibrium started.")
nrnManager.run(self.param['t_equilibrium_neuron'])
for spine_id in self.param['stimulated_spines']:
spine = nrnManager.spines[spine_id]
runner.advance_ecell(spine, self.param['t_equilibrium_ecell'])
spine.set_ampa_equilibrium_baseline()
logger.info ("Equilibrium run finished. Starting normal simulation.")
logger.info ("#--#")
def main(self):
logger.info ("#--#")
logger.info ("Equilibrium run for the two simulators")
# Neuron Setup -----------------------------------------------------------
nrnManager = NeuronManager(self.param['biochemical_filename'],
self.param['big_spine'],
self.param['dtNeuron'],
spines_dist=self.param['spines_dist'],
mod_path='mod',
hoc_path='hoc')
# Easier to debug.
self.nrnManager = nrnManager
nrnManager.set_kir_gkbar(self.param['kir_gkbar'])
excitatory_stims = self.create_excitatory_inputs(nrnManager)
logger.info ("This are the time of the stims: %s" %excitatory_stims)
# Recording -----------------------------------------------
# - Recording and stimul
# - Set the stimuls to the synapses
# - Initialize Ecell in each spine
# Threading it!
nrnManager.enable_threads(self.param['nthreads'], multisplit_on=False)
self.record_vectors(nrnManager)
# Experiment -----------------------------------------------
nrnManager.init() # Initializing neuron
if self.param['bio_on']:
self.equilibrium(nrnManager)
self.run_simulation(nrnManager, excitatory_stims)
else:
# Only Electrical
tstop = self.param['t_equilibrium_neuron'] + self.param['tStop']
self.test_electrical_weight_change()
# Save the Results ------------------------------------
saving_dir = self.manager.create_new_dir(root='Data')
self.save_results(nrnManager, saving_dir)
self.plot_results(nrnManager, saving_dir)
def plot_results(self, nrnManager, saving_dir):
for i, var in enumerate(self.param['var_to_plot']):
secs = self.param['section_to_plot']
vecs_to_plot = self.build_vecs_to_plot(var,
secs,
self.manager.refs['VecRef'])
self.manager.plot_vecs(vecs_to_plot, figure_num=i)
if var == 'v':
plt.ylabel("Voltage [mV]")
plt.xlabel("Time [ms]")
plt.ylim(-90) # Setting the minimum limits
elif var == 'cai' or var == 'cali':
plt.xlabel("Time [ms]")
plt.ylabel("Concentration [mM]")
elif var == 'ica':
plt.xlabel("Time [ms]")
plt.ylabel("Current [nA]")
#
fig_file = 'plot_' + var
plt.savefig(os.path.join(saving_dir, fig_file))
if self.param['bio_on']:
from helpers.plotter import EcellPlotter
ecp = EcellPlotter()
x_start = self.param['t_equilibrium_ecell']
x_stop = x_start + self.param['tStop']/1e3
for stim_spine in self.param['stimulated_spines']:
spine = nrnManager.spines[stim_spine]
ecp.plot_timeCourses(spine.ecellMan.timeCourses, save=True,
dir=saving_dir, name=spine.id,
x_lims= [x_start, x_stop])
ecp.plot_weight(spine.ecellMan.timeCourses, dir=saving_dir)
def record_vectors(self, nrnManager):
"""Add a vecRef to record the vectors"""
t_i_r = self.param['neuron_time_recording_interval']
for spine_id in self.param['stimulated_spines']:
spine = nrnManager.spines[spine_id]
for syn in spine.synapses:
pp = syn.chan
self.manager.create_time_record(time_interval_recording=t_i_r,
point_process=pp)
for var in self.param['var_to_plot']:
for sec_rec in self.param['sec_to_rec']:
if sec_rec == 'all':
self.manager.add_all_vecRef(var,
t_i_r)
break
else:
for sec in h.allsec():
if sec.name() in self.param['sec_to_rec']:
self.manager.add_vecRef(var,
sec,
t_i_r)
# Recording the synapses
for spine_id in self.param['stimulated_spines']:
spine = nrnManager.spines[spine_id]
for syn in spine.synapses:
self.manager.add_synVecRef(syn)
def run_simulation(self, nrnManager, excitatory_stims):
"""
Run the simulation. If input synchronizes the two simulators,
otherwise run each on its own and advance quickly
"""
# Processing the options
tStop_final = self.param['tStop'] + self.param['t_equilibrium_neuron']
# Getting the calcium before the stims
for spine_id in self.param['stimulated_spines']:
spine = nrnManager.spines[spine_id]
self.update_synape_weight(spine)
while h.t < tStop_final:
if excitatory_stims:
t_stim = excitatory_stims.pop(0)
s_log = "Current Neuron time: %s. \
Current t_stim: %s, remaining input: %s" %(h.t,
t_stim,
len(excitatory_stims))
logger.debug( s_log)
if h.t < t_stim:
self.advance_quickly(t_stim, nrnManager)
tmp_tstop = t_stim + self.param['t_buffer']
self.synch_simulators(tmp_tstop, nrnManager)
else:
logger.debug( "No excitatory input remaining. Quickly to the end")
self.advance_quickly(tStop_final, nrnManager)
h.fadvance() # This is to force the latest step and avoid the infinite loop.
# Recording last
for spine_id in self.param['stimulated_spines']:
spine = nrnManager.spines[spine_id]
self.update_synape_weight(spine)
def save_results(self, nrnManager, saving_dir):
"""Saving both results"""
if self.param['bio_on']:
# Add timeseries
extRef = ExtRef()
extRef.add_timeseries(self.manager,
self.param['stimulated_spines'],
nrnManager)
# Saving the weight
extRef.add_weights(self.manager,
self.param['stimulated_spines'],
nrnManager)
# Saving the weight
extRef.add_kflux(self.manager,
self.param['stimulated_spines'],
nrnManager)
logger.info( "Simulation Ended. Saving results")
hdf_name = 'storage.h5'
filename = os.path.join(saving_dir, hdf_name)
logger.info( "Results will be saved in %s" %filename)
# Saving everything
self.manager.save_to_hdf(filename)
def synch_simulators(self, tmp_tstop, nrnManager):
"""
Calculate the synapse weight, using the calcium in the spine_heads
as input.
Synch the two simulators using the following steps:
1. Calculate the calcium concentration in the spines head in
NEURON and set this value in ecell.
2. Advance ecell for the specified_delta
3. Update the electric weight of the synapses in NEURON
"""
logger.info ("Current time: %f Synchronizing sims till [ms] %s" %(h.t, tmp_tstop))
stimulated_spines = self.param['stimulated_spines']
t_sync_start = h.t
while h.t < tmp_tstop:
h.fadvance() # run Neuron for step
# We are updating the calcium according to our
# delta calcium sampling.
# due to numerical errors we can't use straight comparison,
# but we need to wrap this into a lower/upper bounds
# conditions.
lower_time = t_sync_start + self.param['delta_calcium_sampling']
upper_time = lower_time + self.param['dtNeuron']
# logger.debug( "Lower time: %.15f h.t: %.15f Upper time: %.15f" %(lower_time,
# h.t,
# upper_time))
if lower_time <= h.t <= upper_time:
for spine_id in stimulated_spines :
spine = nrnManager.spines[spine_id]
self.sync_calcium(spine)
self.advance_ecell(spine, (h.t - t_sync_start) / 1e3)
# Stopping flux from the input.
spine.ecellMan.ca_in['k'] = 0
# Re-enabling pump and leak.
spine.ecellMan.ca_leak['vmax'] = self.param['ca_leak_vmax']
spine.ecellMan.ca_pump['vmax'] = self.param['ca_pump_vmax']
self.update_synape_weight(spine)
t_sync_start = h.t # Resetting the t_start to the new NEURON time.
def sync_calcium(self, spine):
""""
Calculate the flux of the calcium in the spine_head and synch
it with ecell.
"""
if hasattr(spine, 'ecellMan'):
k_ca_flux = self.get_calcium_flux(spine)
# Unit conversion in update_calcium
spine.update_calcium(k_ca_flux)
def update_synape_weight(self, spine):
"""
Update the electrical weight's synapses. Use the baseline calculated
just after the equilibrium as reference to estimate the change of the weight
spine : the spine where the weight should be updated
baseline : the equilibrium concentration of AMPAR-P which we use as reference
and put equal to one.
"""
# Updating the AMPA synapses
for syn in spine.synapses:
if syn.chan_type == 'ampa':
# Retrieve the value of the weight.
weight = spine.ecellMan.ampar_P['Value']/spine.ampa_equilibrium_conc
syn.netCon.weight[0] = weight
# The weight of the ampa is a double list
# Check the specs in synapse weight for more info.
syn.weight[0].append(h.t)
syn.weight[1].append(weight)
logger.debug( "Updating synapse weight in %s, time [ms]: %s, weight: %s, netCon: %s" %(spine.id,
h.t,
weight,
syn.netCon.weight[0]))
logger.debug( "AMPA syn value g: %s itmp: %s ical: %s i: %s scale: %s voltage: %s" %(syn.chan.g,
syn.chan.itmp,
syn.chan.ical,
syn.chan.i,
syn.chan.scale,
spine.psd.v ))
itmp = syn.chan.scale * syn.chan.g * spine.psd.v
logger.debug( "itmp in NEURON: %s, itmp calculated: %s" %(syn.chan.itmp, itmp))
def test_electrical_weight_change(self):
"""Run the sims till tstop, and then change the weight"""
t_eq = self.param['t_equilibrium_neuron']
runner.nrnManager.run(200) # first input 180
sp1 =runner.nrnManager.spines['spine1']
syn_a = sp1.synapses[0]
syn_a.netCon.weight[0] = 1.5
tStop = self.param['tStop']
tStop += t_eq
runner.nrnManager.run(tStop)
runner.plot_results
if __name__ == "__main__":
if len(sys.argv) != 2:
logger.warning("No parameter file supplied. Abort.")
usage = 'python spineIntegration.py parameters_file.param'
logger.info( usage)
sys.exit()
parameter_file = sys.argv[1]
parameter_dict = parameters.ParameterSet(parameter_file)
runner = Runner(parameter_dict)
runner.main()
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| mit |
ambidextrous/weeBabyBigData | graphActivities.py | 1 | 6909 | import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.patches as patches
import datetime as dt
import csv
import sys
from matplotlib.backends.backend_pdf import PdfPages
MINUTES_IN_DAY = 1440.0
# Graph data using matplotlib visualization
def plotData(data,maxDate,minDate):
colourChoices = ['b','r','g','y']
activityChoices = ['Sleeping','Feeding']
# Set up an invisible background scatterplot give graph the correct size
# Make a series of events that are one day apart
x = mpl.dates.drange(minDate,maxDate,dt.timedelta(days=1))
# Offset first event to top of graph to give correct height
x[0] += 0.85
# Extract the time using a modulo 1, and adding an arbitrary base date
# int used so that y-axis starts at midnight
times = x % 1 + int(x[0])
fig = plt.figure(figsize=(20,10))
fig.set_size_inches(11.69,8.27)
fig.suptitle('Daily Activity Patterns', fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
# Set background scatterplot to invisible
ax.plot_date(x, times, 'ro', color='w', visible=False)
ax.yaxis_date()
fig.autofmt_xdate()
start, end = ax.get_ylim()
# Fix division sizes and labels to show hours on y-axis
hourDivision = 1.0 / 24.0
ax.yaxis.set_ticks(np.arange(start,end,hourDivision))
ax.set_yticklabels(['Midnight','1am','2am','3am','4am','5am','6am','7am','8am','9am','10am','11am','Midday','1pm','2pm','3pm','4pm','5pm','6pm','7pm','8pm','9pm','10pm','11pm','Midnight'])
spaceBufferSize = dt.timedelta(hours=1)
# Iterate through data
for i in range(0,len(data)):
# If period starts and finishes on different days, split and add to both days
if data[i].startTime > data[i].stopTime:
currentDataItem = data[i]
currentDate = dt.datetime(currentDataItem.year,currentDataItem.month,currentDataItem.day)
currentDate -= dt.timedelta(days=0.5)
tomorrow = currentDate + dt.timedelta(days=1)
plt.axvspan(xmin=currentDate+spaceBufferSize, xmax=tomorrow-spaceBufferSize, ymin=currentDataItem.startTime, ymax=1, facecolor=colourChoices[data[i].activityIndex], alpha=0.5)
theDayAfterTomorrow = tomorrow + dt.timedelta(days=1)
plt.axvspan(xmin=tomorrow+spaceBufferSize, xmax=theDayAfterTomorrow-spaceBufferSize, ymin=0, ymax=currentDataItem.stopTime, facecolor=colourChoices[data[i].activityIndex], alpha=0.5)
# Else, add to given day
else:
currentDataItem = data[i]
currentDate = dt.datetime(currentDataItem.year,currentDataItem.month,currentDataItem.day)
currentDate -= dt.timedelta(days=0.5)
tomorrow = currentDate + dt.timedelta(days=1)
plt.axvspan(xmin=currentDate+spaceBufferSize, xmax=tomorrow-spaceBufferSize, ymin=currentDataItem.startTime, ymax=currentDataItem.stopTime, facecolor=colourChoices[currentDataItem.activityIndex], alpha=0.5)
# Labels x and y axes
ax.set_ylabel('Hours of day',fontweight='bold')
ax.set_xlabel('Days: '+str(minDate.strftime("%A, %d %B %Y"))+' to '+str(maxDate.strftime("%A, %d %B %Y")),fontweight='bold')
ax.grid(True)
# Adds legend
labels = []
for i in range(len(activityChoices)):
labels.append(patches.Patch(color=colourChoices[i], label=activityChoices[i], alpha=0.5))
plt.legend(handles=labels)
# Ensures axis labels not cut off
plt.tight_layout()
# Ensures suptitle doesn't overlap graph
plt.subplots_adjust(top=0.92)
# Saves to file
plt.savefig('activityData.pdf')
plt.savefig('activityData.jpg')
# Shows file onscreen
plt.show()
# Class to store a time period
class timePeriod(object):
def __init__(self,name,begins,ends):
self.name = name
self.begins = begins
self.ends = ends
self.activitiesAndTimePercentages = {}
self.subperiods = {}
self.seconds = (self.ends - self.begins).total_seconds()
def __str__(self):
return "timePeriod name:"+str(self.name)+": starts:"+str(self.begins)+"; ends:"+str(self.ends)+"; seconds;"+str(self.seconds)
# Read data from csv file
def readDataFromFile(dataFile,eventIndex):
f = open(dataFile,'rt')
listOfInputLists = []
try:
reader = csv.reader(f)
for row in reader:
row.append(str(eventIndex))
listOfInputLists.append(row)
finally:
f.close()
return listOfInputLists
# Class to store time and date data read from file
class activityInstance(object):
def __init__(self,listOfInputLists):
self.day = 0
self.month = 0
self.year = 0
self.formatDate(listOfInputLists[0])
self.startTime = self.formatTime(listOfInputLists[1])
self.stopTime = self.formatTime(listOfInputLists[2])
self.activityIndex = int(listOfInputLists[3])
# Extracts date information variables
def formatDate(self,unformattedDate):
date = dt.datetime.strptime(unformattedDate,"%d/%m/%y")
self.day = int(date.strftime("%d"))
self.month = int(date.strftime("%m"))
self.year = int(date.strftime("%Y"))
# Formats time as a decimal fraction of day, for use in graph
def formatTime(self,unformattedTime):
timeSinceMidnight = dt.datetime.strptime(unformattedTime,'%H:%M:%S')
midnight = dt.datetime(1900,1,1)
minutesSinceMidnight = ((timeSinceMidnight - midnight).total_seconds() / 60.0)
fractionOfDay = minutesSinceMidnight / MINUTES_IN_DAY
return fractionOfDay
# Formats data read from file as a list of eventInstance objects
def formatDataForPlot(listOfInputLists):
activities = []
for i in range(len(listOfInputLists)):
for j in range(1,len(listOfInputLists[i])):
activities.append(activityInstance(listOfInputLists[i][j]))
return activities
# Extracts earliest (min) and latest (max) dates from data, for use in setting graph limits
def getMaxAndMinDates(plotDataList):
dateTimeList = []
for item in plotDataList:
nextDate = dt.datetime(item.year,item.month,item.day)
dateTimeList.append(nextDate)
maxDate = max(dateTimeList)
minDate = min(dateTimeList)
# Ensure minimun of three days displayed
if not maxDate > minDate + dt.timedelta(days=1):
maxDate = minDate + dt.timedelta(days=2)
return maxDate, minDate
# Runs programme to read data files and contruct time-use graphic from them
def go():
dataFiles = ['sleepingData.csv','feedingData.csv']
listOfInputLists = []
for i in range(len(dataFiles)):
nextList = readDataFromFile(dataFiles[i],i)
listOfInputLists.append(nextList)
plotDataList = formatDataForPlot(listOfInputLists)
maxDate, minDate = getMaxAndMinDates(plotDataList)
plotData(plotDataList,maxDate,minDate)
go()
| mit |
Sentient07/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
dpshelio/scikit-image | doc/examples/plot_regionprops.py | 14 | 1296 | """
=========================
Measure region properties
=========================
This example shows how to measure properties of labelled image regions.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from skimage.draw import ellipse
from skimage.measure import label, regionprops
from skimage.transform import rotate
image = np.zeros((600, 600))
rr, cc = ellipse(300, 350, 100, 220)
image[rr,cc] = 1
image = rotate(image, angle=15, order=0)
label_img = label(image)
regions = regionprops(label_img)
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for props in regions:
y0, x0 = props.centroid
orientation = props.orientation
x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
minr, minc, maxr, maxc = props.bbox
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
ax.plot(bx, by, '-b', linewidth=2.5)
ax.axis((0, 600, 600, 0))
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.